aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-12-04 06:30:53 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2010-12-04 19:37:38 -0500
commit1ec14ad3132702694f2e1a90b30641cf111183b9 (patch)
tree98ca9ae91f14ff5d8feed306941ea2c46479e71a /drivers/gpu/drm
parent340479aac697bc73e225c122a9753d4964eeda3f (diff)
drm/i915: Implement GPU semaphores for inter-ring synchronisation on SNB
The bulk of the change is to convert the growing list of rings into an array so that the relationship between the rings and the semaphore sync registers can be easily computed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c75
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c60
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h48
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c86
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c94
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c209
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h19
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c8
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c415
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h41
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c14
14 files changed, 648 insertions, 439 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3c9d4b876865..aedb02157474 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -339,10 +339,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
339 return ret; 339 return ret;
340 340
341 count = 0; 341 count = 0;
342 if (!list_empty(&dev_priv->render_ring.request_list)) { 342 if (!list_empty(&dev_priv->ring[RCS].request_list)) {
343 seq_printf(m, "Render requests:\n"); 343 seq_printf(m, "Render requests:\n");
344 list_for_each_entry(gem_request, 344 list_for_each_entry(gem_request,
345 &dev_priv->render_ring.request_list, 345 &dev_priv->ring[RCS].request_list,
346 list) { 346 list) {
347 seq_printf(m, " %d @ %d\n", 347 seq_printf(m, " %d @ %d\n",
348 gem_request->seqno, 348 gem_request->seqno,
@@ -350,10 +350,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
350 } 350 }
351 count++; 351 count++;
352 } 352 }
353 if (!list_empty(&dev_priv->bsd_ring.request_list)) { 353 if (!list_empty(&dev_priv->ring[VCS].request_list)) {
354 seq_printf(m, "BSD requests:\n"); 354 seq_printf(m, "BSD requests:\n");
355 list_for_each_entry(gem_request, 355 list_for_each_entry(gem_request,
356 &dev_priv->bsd_ring.request_list, 356 &dev_priv->ring[VCS].request_list,
357 list) { 357 list) {
358 seq_printf(m, " %d @ %d\n", 358 seq_printf(m, " %d @ %d\n",
359 gem_request->seqno, 359 gem_request->seqno,
@@ -361,10 +361,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
361 } 361 }
362 count++; 362 count++;
363 } 363 }
364 if (!list_empty(&dev_priv->blt_ring.request_list)) { 364 if (!list_empty(&dev_priv->ring[BCS].request_list)) {
365 seq_printf(m, "BLT requests:\n"); 365 seq_printf(m, "BLT requests:\n");
366 list_for_each_entry(gem_request, 366 list_for_each_entry(gem_request,
367 &dev_priv->blt_ring.request_list, 367 &dev_priv->ring[BCS].request_list,
368 list) { 368 list) {
369 seq_printf(m, " %d @ %d\n", 369 seq_printf(m, " %d @ %d\n",
370 gem_request->seqno, 370 gem_request->seqno,
@@ -398,15 +398,14 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
398 struct drm_info_node *node = (struct drm_info_node *) m->private; 398 struct drm_info_node *node = (struct drm_info_node *) m->private;
399 struct drm_device *dev = node->minor->dev; 399 struct drm_device *dev = node->minor->dev;
400 drm_i915_private_t *dev_priv = dev->dev_private; 400 drm_i915_private_t *dev_priv = dev->dev_private;
401 int ret; 401 int ret, i;
402 402
403 ret = mutex_lock_interruptible(&dev->struct_mutex); 403 ret = mutex_lock_interruptible(&dev->struct_mutex);
404 if (ret) 404 if (ret)
405 return ret; 405 return ret;
406 406
407 i915_ring_seqno_info(m, &dev_priv->render_ring); 407 for (i = 0; i < I915_NUM_RINGS; i++)
408 i915_ring_seqno_info(m, &dev_priv->bsd_ring); 408 i915_ring_seqno_info(m, &dev_priv->ring[i]);
409 i915_ring_seqno_info(m, &dev_priv->blt_ring);
410 409
411 mutex_unlock(&dev->struct_mutex); 410 mutex_unlock(&dev->struct_mutex);
412 411
@@ -419,7 +418,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
419 struct drm_info_node *node = (struct drm_info_node *) m->private; 418 struct drm_info_node *node = (struct drm_info_node *) m->private;
420 struct drm_device *dev = node->minor->dev; 419 struct drm_device *dev = node->minor->dev;
421 drm_i915_private_t *dev_priv = dev->dev_private; 420 drm_i915_private_t *dev_priv = dev->dev_private;
422 int ret; 421 int ret, i;
423 422
424 ret = mutex_lock_interruptible(&dev->struct_mutex); 423 ret = mutex_lock_interruptible(&dev->struct_mutex);
425 if (ret) 424 if (ret)
@@ -458,9 +457,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
458 } 457 }
459 seq_printf(m, "Interrupts received: %d\n", 458 seq_printf(m, "Interrupts received: %d\n",
460 atomic_read(&dev_priv->irq_received)); 459 atomic_read(&dev_priv->irq_received));
461 i915_ring_seqno_info(m, &dev_priv->render_ring); 460 for (i = 0; i < I915_NUM_RINGS; i++)
462 i915_ring_seqno_info(m, &dev_priv->bsd_ring); 461 i915_ring_seqno_info(m, &dev_priv->ring[i]);
463 i915_ring_seqno_info(m, &dev_priv->blt_ring);
464 mutex_unlock(&dev->struct_mutex); 462 mutex_unlock(&dev->struct_mutex);
465 463
466 return 0; 464 return 0;
@@ -503,13 +501,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
503 volatile u32 *hws; 501 volatile u32 *hws;
504 int i; 502 int i;
505 503
506 switch ((uintptr_t)node->info_ent->data) { 504 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
507 case RING_RENDER: ring = &dev_priv->render_ring; break;
508 case RING_BSD: ring = &dev_priv->bsd_ring; break;
509 case RING_BLT: ring = &dev_priv->blt_ring; break;
510 default: return -EINVAL;
511 }
512
513 hws = (volatile u32 *)ring->status_page.page_addr; 505 hws = (volatile u32 *)ring->status_page.page_addr;
514 if (hws == NULL) 506 if (hws == NULL)
515 return 0; 507 return 0;
@@ -569,17 +561,11 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
569 struct intel_ring_buffer *ring; 561 struct intel_ring_buffer *ring;
570 int ret; 562 int ret;
571 563
572 switch ((uintptr_t)node->info_ent->data) {
573 case RING_RENDER: ring = &dev_priv->render_ring; break;
574 case RING_BSD: ring = &dev_priv->bsd_ring; break;
575 case RING_BLT: ring = &dev_priv->blt_ring; break;
576 default: return -EINVAL;
577 }
578
579 ret = mutex_lock_interruptible(&dev->struct_mutex); 564 ret = mutex_lock_interruptible(&dev->struct_mutex);
580 if (ret) 565 if (ret)
581 return ret; 566 return ret;
582 567
568 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
583 if (!ring->obj) { 569 if (!ring->obj) {
584 seq_printf(m, "No ringbuffer setup\n"); 570 seq_printf(m, "No ringbuffer setup\n");
585 } else { 571 } else {
@@ -603,21 +589,20 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
603 drm_i915_private_t *dev_priv = dev->dev_private; 589 drm_i915_private_t *dev_priv = dev->dev_private;
604 struct intel_ring_buffer *ring; 590 struct intel_ring_buffer *ring;
605 591
606 switch ((uintptr_t)node->info_ent->data) { 592 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
607 case RING_RENDER: ring = &dev_priv->render_ring; break;
608 case RING_BSD: ring = &dev_priv->bsd_ring; break;
609 case RING_BLT: ring = &dev_priv->blt_ring; break;
610 default: return -EINVAL;
611 }
612
613 if (ring->size == 0) 593 if (ring->size == 0)
614 return 0; 594 return 0;
615 595
616 seq_printf(m, "Ring %s:\n", ring->name); 596 seq_printf(m, "Ring %s:\n", ring->name);
617 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); 597 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
618 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); 598 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
619 seq_printf(m, " Size : %08x\n", ring->size); 599 seq_printf(m, " Size : %08x\n", ring->size);
620 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); 600 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
601 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
602 if (IS_GEN6(dev)) {
603 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
604 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
605 }
621 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 606 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
622 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 607 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
623 608
@@ -1177,15 +1162,15 @@ static struct drm_info_list i915_debugfs_list[] = {
1177 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1162 {"i915_gem_seqno", i915_gem_seqno_info, 0},
1178 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 1163 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1179 {"i915_gem_interrupt", i915_interrupt_info, 0}, 1164 {"i915_gem_interrupt", i915_interrupt_info, 0},
1180 {"i915_gem_hws", i915_hws_info, 0, (void *)RING_RENDER}, 1165 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1181 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)RING_BLT}, 1166 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1182 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)RING_BSD}, 1167 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1183 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_RENDER}, 1168 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1184 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_RENDER}, 1169 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1185 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BSD}, 1170 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1186 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BSD}, 1171 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1187 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BLT}, 1172 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1188 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BLT}, 1173 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1189 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 1174 {"i915_batchbuffers", i915_batchbuffer_info, 0},
1190 {"i915_error_state", i915_error_state, 0}, 1175 {"i915_error_state", i915_error_state, 0},
1191 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1176 {"i915_rstdby_delays", i915_rstdby_delays, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9a22da9b2083..664300986fb4 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -49,6 +49,8 @@
49static int i915_init_phys_hws(struct drm_device *dev) 49static int i915_init_phys_hws(struct drm_device *dev)
50{ 50{
51 drm_i915_private_t *dev_priv = dev->dev_private; 51 drm_i915_private_t *dev_priv = dev->dev_private;
52 struct intel_ring_buffer *ring = LP_RING(dev_priv);
53
52 /* Program Hardware Status Page */ 54 /* Program Hardware Status Page */
53 dev_priv->status_page_dmah = 55 dev_priv->status_page_dmah =
54 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); 56 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
@@ -57,11 +59,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
57 DRM_ERROR("Can not allocate hardware status page\n"); 59 DRM_ERROR("Can not allocate hardware status page\n");
58 return -ENOMEM; 60 return -ENOMEM;
59 } 61 }
60 dev_priv->render_ring.status_page.page_addr 62 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
61 = dev_priv->status_page_dmah->vaddr;
62 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 63 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
63 64
64 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); 65 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
65 66
66 if (INTEL_INFO(dev)->gen >= 4) 67 if (INTEL_INFO(dev)->gen >= 4)
67 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 68 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -79,13 +80,15 @@ static int i915_init_phys_hws(struct drm_device *dev)
79static void i915_free_hws(struct drm_device *dev) 80static void i915_free_hws(struct drm_device *dev)
80{ 81{
81 drm_i915_private_t *dev_priv = dev->dev_private; 82 drm_i915_private_t *dev_priv = dev->dev_private;
83 struct intel_ring_buffer *ring = LP_RING(dev_priv);
84
82 if (dev_priv->status_page_dmah) { 85 if (dev_priv->status_page_dmah) {
83 drm_pci_free(dev, dev_priv->status_page_dmah); 86 drm_pci_free(dev, dev_priv->status_page_dmah);
84 dev_priv->status_page_dmah = NULL; 87 dev_priv->status_page_dmah = NULL;
85 } 88 }
86 89
87 if (dev_priv->render_ring.status_page.gfx_addr) { 90 if (ring->status_page.gfx_addr) {
88 dev_priv->render_ring.status_page.gfx_addr = 0; 91 ring->status_page.gfx_addr = 0;
89 drm_core_ioremapfree(&dev_priv->hws_map, dev); 92 drm_core_ioremapfree(&dev_priv->hws_map, dev);
90 } 93 }
91 94
@@ -97,7 +100,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
97{ 100{
98 drm_i915_private_t *dev_priv = dev->dev_private; 101 drm_i915_private_t *dev_priv = dev->dev_private;
99 struct drm_i915_master_private *master_priv; 102 struct drm_i915_master_private *master_priv;
100 struct intel_ring_buffer *ring = &dev_priv->render_ring; 103 struct intel_ring_buffer *ring = LP_RING(dev_priv);
101 104
102 /* 105 /*
103 * We should never lose context on the ring with modesetting 106 * We should never lose context on the ring with modesetting
@@ -123,6 +126,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
123static int i915_dma_cleanup(struct drm_device * dev) 126static int i915_dma_cleanup(struct drm_device * dev)
124{ 127{
125 drm_i915_private_t *dev_priv = dev->dev_private; 128 drm_i915_private_t *dev_priv = dev->dev_private;
129 int i;
130
126 /* Make sure interrupts are disabled here because the uninstall ioctl 131 /* Make sure interrupts are disabled here because the uninstall ioctl
127 * may not have been called from userspace and after dev_private 132 * may not have been called from userspace and after dev_private
128 * is freed, it's too late. 133 * is freed, it's too late.
@@ -131,9 +136,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
131 drm_irq_uninstall(dev); 136 drm_irq_uninstall(dev);
132 137
133 mutex_lock(&dev->struct_mutex); 138 mutex_lock(&dev->struct_mutex);
134 intel_cleanup_ring_buffer(&dev_priv->render_ring); 139 for (i = 0; i < I915_NUM_RINGS; i++)
135 intel_cleanup_ring_buffer(&dev_priv->bsd_ring); 140 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
136 intel_cleanup_ring_buffer(&dev_priv->blt_ring);
137 mutex_unlock(&dev->struct_mutex); 141 mutex_unlock(&dev->struct_mutex);
138 142
139 /* Clear the HWS virtual address at teardown */ 143 /* Clear the HWS virtual address at teardown */
@@ -147,6 +151,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
147{ 151{
148 drm_i915_private_t *dev_priv = dev->dev_private; 152 drm_i915_private_t *dev_priv = dev->dev_private;
149 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 153 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
154 struct intel_ring_buffer *ring = LP_RING(dev_priv);
150 155
151 master_priv->sarea = drm_getsarea(dev); 156 master_priv->sarea = drm_getsarea(dev);
152 if (master_priv->sarea) { 157 if (master_priv->sarea) {
@@ -157,24 +162,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
157 } 162 }
158 163
159 if (init->ring_size != 0) { 164 if (init->ring_size != 0) {
160 if (dev_priv->render_ring.obj != NULL) { 165 if (ring->obj != NULL) {
161 i915_dma_cleanup(dev); 166 i915_dma_cleanup(dev);
162 DRM_ERROR("Client tried to initialize ringbuffer in " 167 DRM_ERROR("Client tried to initialize ringbuffer in "
163 "GEM mode\n"); 168 "GEM mode\n");
164 return -EINVAL; 169 return -EINVAL;
165 } 170 }
166 171
167 dev_priv->render_ring.size = init->ring_size; 172 ring->size = init->ring_size;
168 173
169 dev_priv->render_ring.map.offset = init->ring_start; 174 ring->map.offset = init->ring_start;
170 dev_priv->render_ring.map.size = init->ring_size; 175 ring->map.size = init->ring_size;
171 dev_priv->render_ring.map.type = 0; 176 ring->map.type = 0;
172 dev_priv->render_ring.map.flags = 0; 177 ring->map.flags = 0;
173 dev_priv->render_ring.map.mtrr = 0; 178 ring->map.mtrr = 0;
174 179
175 drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); 180 drm_core_ioremap_wc(&ring->map, dev);
176 181
177 if (dev_priv->render_ring.map.handle == NULL) { 182 if (ring->map.handle == NULL) {
178 i915_dma_cleanup(dev); 183 i915_dma_cleanup(dev);
179 DRM_ERROR("can not ioremap virtual address for" 184 DRM_ERROR("can not ioremap virtual address for"
180 " ring buffer\n"); 185 " ring buffer\n");
@@ -182,7 +187,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
182 } 187 }
183 } 188 }
184 189
185 dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; 190 ring->virtual_start = ring->map.handle;
186 191
187 dev_priv->cpp = init->cpp; 192 dev_priv->cpp = init->cpp;
188 dev_priv->back_offset = init->back_offset; 193 dev_priv->back_offset = init->back_offset;
@@ -201,12 +206,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
201static int i915_dma_resume(struct drm_device * dev) 206static int i915_dma_resume(struct drm_device * dev)
202{ 207{
203 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 208 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
209 struct intel_ring_buffer *ring = LP_RING(dev_priv);
204 210
205 struct intel_ring_buffer *ring;
206 DRM_DEBUG_DRIVER("%s\n", __func__); 211 DRM_DEBUG_DRIVER("%s\n", __func__);
207 212
208 ring = &dev_priv->render_ring;
209
210 if (ring->map.handle == NULL) { 213 if (ring->map.handle == NULL) {
211 DRM_ERROR("can not ioremap virtual address for" 214 DRM_ERROR("can not ioremap virtual address for"
212 " ring buffer\n"); 215 " ring buffer\n");
@@ -326,7 +329,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
326 drm_i915_private_t *dev_priv = dev->dev_private; 329 drm_i915_private_t *dev_priv = dev->dev_private;
327 int i, ret; 330 int i, ret;
328 331
329 if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) 332 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
330 return -EINVAL; 333 return -EINVAL;
331 334
332 for (i = 0; i < dwords;) { 335 for (i = 0; i < dwords;) {
@@ -565,13 +568,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
565 return 0; 568 return 0;
566} 569}
567 570
568static int i915_quiescent(struct drm_device * dev) 571static int i915_quiescent(struct drm_device *dev)
569{ 572{
570 drm_i915_private_t *dev_priv = dev->dev_private; 573 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
571 574
572 i915_kernel_lost_context(dev); 575 i915_kernel_lost_context(dev);
573 return intel_wait_ring_buffer(&dev_priv->render_ring, 576 return intel_wait_ring_buffer(ring, ring->size - 8);
574 dev_priv->render_ring.size - 8);
575} 577}
576 578
577static int i915_flush_ioctl(struct drm_device *dev, void *data, 579static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -828,7 +830,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
828{ 830{
829 drm_i915_private_t *dev_priv = dev->dev_private; 831 drm_i915_private_t *dev_priv = dev->dev_private;
830 drm_i915_hws_addr_t *hws = data; 832 drm_i915_hws_addr_t *hws = data;
831 struct intel_ring_buffer *ring = &dev_priv->render_ring; 833 struct intel_ring_buffer *ring = LP_RING(dev_priv);
832 834
833 if (!I915_NEED_GFX_HWS(dev)) 835 if (!I915_NEED_GFX_HWS(dev))
834 return -EINVAL; 836 return -EINVAL;
@@ -1978,7 +1980,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1978 if (!IS_I945G(dev) && !IS_I945GM(dev)) 1980 if (!IS_I945G(dev) && !IS_I945GM(dev))
1979 pci_enable_msi(dev->pdev); 1981 pci_enable_msi(dev->pdev);
1980 1982
1981 spin_lock_init(&dev_priv->user_irq_lock); 1983 spin_lock_init(&dev_priv->irq_lock);
1982 spin_lock_init(&dev_priv->error_lock); 1984 spin_lock_init(&dev_priv->error_lock);
1983 dev_priv->trace_irq_seqno = 0; 1985 dev_priv->trace_irq_seqno = 0;
1984 1986
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 64844e2e9f86..413a040386a9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -487,11 +487,11 @@ int i915_reset(struct drm_device *dev, u8 flags)
487 !dev_priv->mm.suspended) { 487 !dev_priv->mm.suspended) {
488 dev_priv->mm.suspended = 0; 488 dev_priv->mm.suspended = 0;
489 489
490 dev_priv->render_ring.init(&dev_priv->render_ring); 490 dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
491 if (HAS_BSD(dev)) 491 if (HAS_BSD(dev))
492 dev_priv->bsd_ring.init(&dev_priv->bsd_ring); 492 dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
493 if (HAS_BLT(dev)) 493 if (HAS_BLT(dev))
494 dev_priv->blt_ring.init(&dev_priv->blt_ring); 494 dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
495 495
496 mutex_unlock(&dev->struct_mutex); 496 mutex_unlock(&dev->struct_mutex);
497 drm_irq_uninstall(dev); 497 drm_irq_uninstall(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index af9ff40b135b..8b19b5806230 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -269,9 +269,7 @@ typedef struct drm_i915_private {
269 } *gmbus; 269 } *gmbus;
270 270
271 struct pci_dev *bridge_dev; 271 struct pci_dev *bridge_dev;
272 struct intel_ring_buffer render_ring; 272 struct intel_ring_buffer ring[I915_NUM_RINGS];
273 struct intel_ring_buffer bsd_ring;
274 struct intel_ring_buffer blt_ring;
275 uint32_t next_seqno; 273 uint32_t next_seqno;
276 274
277 drm_dma_handle_t *status_page_dmah; 275 drm_dma_handle_t *status_page_dmah;
@@ -290,19 +288,15 @@ typedef struct drm_i915_private {
290 int page_flipping; 288 int page_flipping;
291 289
292 atomic_t irq_received; 290 atomic_t irq_received;
293 /** Protects user_irq_refcount and irq_mask_reg */
294 spinlock_t user_irq_lock;
295 u32 trace_irq_seqno; 291 u32 trace_irq_seqno;
292
293 /* protects the irq masks */
294 spinlock_t irq_lock;
296 /** Cached value of IMR to avoid reads in updating the bitfield */ 295 /** Cached value of IMR to avoid reads in updating the bitfield */
297 u32 irq_mask_reg;
298 u32 pipestat[2]; 296 u32 pipestat[2];
299 /** splitted irq regs for graphics and display engine on Ironlake, 297 u32 irq_mask;
300 irq_mask_reg is still used for display irq. */ 298 u32 gt_irq_mask;
301 u32 gt_irq_mask_reg; 299 u32 pch_irq_mask;
302 u32 gt_irq_enable_reg;
303 u32 de_irq_enable_reg;
304 u32 pch_irq_mask_reg;
305 u32 pch_irq_enable_reg;
306 300
307 u32 hotplug_supported_mask; 301 u32 hotplug_supported_mask;
308 struct work_struct hotplug_work; 302 struct work_struct hotplug_work;
@@ -1104,7 +1098,8 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1104int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 1098int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1105 bool interruptible); 1099 bool interruptible);
1106void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1100void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1107 struct intel_ring_buffer *ring); 1101 struct intel_ring_buffer *ring,
1102 u32 seqno);
1108 1103
1109/** 1104/**
1110 * Returns true if seq1 is later than seq2. 1105 * Returns true if seq1 is later than seq2.
@@ -1272,6 +1267,17 @@ extern void intel_display_print_error_state(struct seq_file *m,
1272 struct intel_display_error_state *error); 1267 struct intel_display_error_state *error);
1273#endif 1268#endif
1274 1269
1270#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
1271
1272#define BEGIN_LP_RING(n) \
1273 intel_ring_begin(LP_RING(dev_priv), (n))
1274
1275#define OUT_RING(x) \
1276 intel_ring_emit(LP_RING(dev_priv), x)
1277
1278#define ADVANCE_LP_RING() \
1279 intel_ring_advance(LP_RING(dev_priv))
1280
1275/** 1281/**
1276 * Lock test for when it's just for synchronization of ring access. 1282 * Lock test for when it's just for synchronization of ring access.
1277 * 1283 *
@@ -1279,8 +1285,7 @@ extern void intel_display_print_error_state(struct seq_file *m,
1279 * has access to the ring. 1285 * has access to the ring.
1280 */ 1286 */
1281#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ 1287#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
1282 if (((drm_i915_private_t *)dev->dev_private)->render_ring.obj \ 1288 if (LP_RING(dev->dev_private)->obj == NULL) \
1283 == NULL) \
1284 LOCK_TEST_WITH_RETURN(dev, file); \ 1289 LOCK_TEST_WITH_RETURN(dev, file); \
1285} while (0) 1290} while (0)
1286 1291
@@ -1366,15 +1371,6 @@ i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
1366 } 1371 }
1367} 1372}
1368 1373
1369#define BEGIN_LP_RING(n) \
1370 intel_ring_begin(&dev_priv->render_ring, (n))
1371
1372#define OUT_RING(x) \
1373 intel_ring_emit(&dev_priv->render_ring, x)
1374
1375#define ADVANCE_LP_RING() \
1376 intel_ring_advance(&dev_priv->render_ring)
1377
1378/** 1374/**
1379 * Reads a dword out of the status page, which is written to from the command 1375 * Reads a dword out of the status page, which is written to from the command
1380 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 1376 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -1391,7 +1387,7 @@ i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
1391 * The area from dword 0x20 to 0x3ff is available for driver usage. 1387 * The area from dword 0x20 to 0x3ff is available for driver usage.
1392 */ 1388 */
1393#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ 1389#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
1394 (dev_priv->render_ring.status_page.page_addr))[reg]) 1390 (LP_RING(dev_priv)->status_page.page_addr))[reg])
1395#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 1391#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
1396#define I915_GEM_HWS_INDEX 0x20 1392#define I915_GEM_HWS_INDEX 0x20
1397#define I915_BREADCRUMB_INDEX 0x21 1393#define I915_BREADCRUMB_INDEX 0x21
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d99212fe54ed..eeed2e99d247 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1561,11 +1561,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1561 1561
1562void 1562void
1563i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1563i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1564 struct intel_ring_buffer *ring) 1564 struct intel_ring_buffer *ring,
1565 u32 seqno)
1565{ 1566{
1566 struct drm_device *dev = obj->base.dev; 1567 struct drm_device *dev = obj->base.dev;
1567 struct drm_i915_private *dev_priv = dev->dev_private; 1568 struct drm_i915_private *dev_priv = dev->dev_private;
1568 uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
1569 1569
1570 BUG_ON(ring == NULL); 1570 BUG_ON(ring == NULL);
1571 obj->ring = ring; 1571 obj->ring = ring;
@@ -1679,7 +1679,8 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1679 1679
1680 obj->base.write_domain = 0; 1680 obj->base.write_domain = 0;
1681 list_del_init(&obj->gpu_write_list); 1681 list_del_init(&obj->gpu_write_list);
1682 i915_gem_object_move_to_active(obj, ring); 1682 i915_gem_object_move_to_active(obj, ring,
1683 i915_gem_next_request_seqno(dev, ring));
1683 1684
1684 trace_i915_gem_object_change_domain(obj, 1685 trace_i915_gem_object_change_domain(obj,
1685 obj->base.read_domains, 1686 obj->base.read_domains,
@@ -1804,10 +1805,10 @@ void i915_gem_reset(struct drm_device *dev)
1804{ 1805{
1805 struct drm_i915_private *dev_priv = dev->dev_private; 1806 struct drm_i915_private *dev_priv = dev->dev_private;
1806 struct drm_i915_gem_object *obj; 1807 struct drm_i915_gem_object *obj;
1808 int i;
1807 1809
1808 i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); 1810 for (i = 0; i < I915_NUM_RINGS; i++)
1809 i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); 1811 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1810 i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
1811 1812
1812 /* Remove anything from the flushing lists. The GPU cache is likely 1813 /* Remove anything from the flushing lists. The GPU cache is likely
1813 * to be lost on reset along with the data, so simply move the 1814 * to be lost on reset along with the data, so simply move the
@@ -1846,6 +1847,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1846{ 1847{
1847 drm_i915_private_t *dev_priv = dev->dev_private; 1848 drm_i915_private_t *dev_priv = dev->dev_private;
1848 uint32_t seqno; 1849 uint32_t seqno;
1850 int i;
1849 1851
1850 if (!ring->status_page.page_addr || 1852 if (!ring->status_page.page_addr ||
1851 list_empty(&ring->request_list)) 1853 list_empty(&ring->request_list))
@@ -1854,6 +1856,11 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1854 WARN_ON(i915_verify_lists(dev)); 1856 WARN_ON(i915_verify_lists(dev));
1855 1857
1856 seqno = ring->get_seqno(ring); 1858 seqno = ring->get_seqno(ring);
1859
1860 for (i = 0; i < I915_NUM_RINGS; i++)
1861 if (seqno >= ring->sync_seqno[i])
1862 ring->sync_seqno[i] = 0;
1863
1857 while (!list_empty(&ring->request_list)) { 1864 while (!list_empty(&ring->request_list)) {
1858 struct drm_i915_gem_request *request; 1865 struct drm_i915_gem_request *request;
1859 1866
@@ -1892,7 +1899,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1892 1899
1893 if (unlikely (dev_priv->trace_irq_seqno && 1900 if (unlikely (dev_priv->trace_irq_seqno &&
1894 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { 1901 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1895 ring->user_irq_put(ring); 1902 ring->irq_put(ring);
1896 dev_priv->trace_irq_seqno = 0; 1903 dev_priv->trace_irq_seqno = 0;
1897 } 1904 }
1898 1905
@@ -1903,6 +1910,7 @@ void
1903i915_gem_retire_requests(struct drm_device *dev) 1910i915_gem_retire_requests(struct drm_device *dev)
1904{ 1911{
1905 drm_i915_private_t *dev_priv = dev->dev_private; 1912 drm_i915_private_t *dev_priv = dev->dev_private;
1913 int i;
1906 1914
1907 if (!list_empty(&dev_priv->mm.deferred_free_list)) { 1915 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1908 struct drm_i915_gem_object *obj, *next; 1916 struct drm_i915_gem_object *obj, *next;
@@ -1918,9 +1926,8 @@ i915_gem_retire_requests(struct drm_device *dev)
1918 i915_gem_free_object_tail(obj); 1926 i915_gem_free_object_tail(obj);
1919 } 1927 }
1920 1928
1921 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); 1929 for (i = 0; i < I915_NUM_RINGS; i++)
1922 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); 1930 i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
1923 i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
1924} 1931}
1925 1932
1926static void 1933static void
@@ -1942,9 +1949,9 @@ i915_gem_retire_work_handler(struct work_struct *work)
1942 i915_gem_retire_requests(dev); 1949 i915_gem_retire_requests(dev);
1943 1950
1944 if (!dev_priv->mm.suspended && 1951 if (!dev_priv->mm.suspended &&
1945 (!list_empty(&dev_priv->render_ring.request_list) || 1952 (!list_empty(&dev_priv->ring[RCS].request_list) ||
1946 !list_empty(&dev_priv->bsd_ring.request_list) || 1953 !list_empty(&dev_priv->ring[VCS].request_list) ||
1947 !list_empty(&dev_priv->blt_ring.request_list))) 1954 !list_empty(&dev_priv->ring[BCS].request_list)))
1948 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1955 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1949 mutex_unlock(&dev->struct_mutex); 1956 mutex_unlock(&dev->struct_mutex);
1950} 1957}
@@ -1993,7 +2000,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1993 trace_i915_gem_request_wait_begin(dev, seqno); 2000 trace_i915_gem_request_wait_begin(dev, seqno);
1994 2001
1995 ring->waiting_seqno = seqno; 2002 ring->waiting_seqno = seqno;
1996 ring->user_irq_get(ring); 2003 ring->irq_get(ring);
1997 if (interruptible) 2004 if (interruptible)
1998 ret = wait_event_interruptible(ring->irq_queue, 2005 ret = wait_event_interruptible(ring->irq_queue,
1999 i915_seqno_passed(ring->get_seqno(ring), seqno) 2006 i915_seqno_passed(ring->get_seqno(ring), seqno)
@@ -2003,7 +2010,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
2003 i915_seqno_passed(ring->get_seqno(ring), seqno) 2010 i915_seqno_passed(ring->get_seqno(ring), seqno)
2004 || atomic_read(&dev_priv->mm.wedged)); 2011 || atomic_read(&dev_priv->mm.wedged));
2005 2012
2006 ring->user_irq_put(ring); 2013 ring->irq_put(ring);
2007 ring->waiting_seqno = 0; 2014 ring->waiting_seqno = 0;
2008 2015
2009 trace_i915_gem_request_wait_end(dev, seqno); 2016 trace_i915_gem_request_wait_end(dev, seqno);
@@ -2159,7 +2166,7 @@ i915_gpu_idle(struct drm_device *dev)
2159{ 2166{
2160 drm_i915_private_t *dev_priv = dev->dev_private; 2167 drm_i915_private_t *dev_priv = dev->dev_private;
2161 bool lists_empty; 2168 bool lists_empty;
2162 int ret; 2169 int ret, i;
2163 2170
2164 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2171 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2165 list_empty(&dev_priv->mm.active_list)); 2172 list_empty(&dev_priv->mm.active_list));
@@ -2167,17 +2174,11 @@ i915_gpu_idle(struct drm_device *dev)
2167 return 0; 2174 return 0;
2168 2175
2169 /* Flush everything onto the inactive list. */ 2176 /* Flush everything onto the inactive list. */
2170 ret = i915_ring_idle(dev, &dev_priv->render_ring); 2177 for (i = 0; i < I915_NUM_RINGS; i++) {
2171 if (ret) 2178 ret = i915_ring_idle(dev, &dev_priv->ring[i]);
2172 return ret; 2179 if (ret)
2173 2180 return ret;
2174 ret = i915_ring_idle(dev, &dev_priv->bsd_ring); 2181 }
2175 if (ret)
2176 return ret;
2177
2178 ret = i915_ring_idle(dev, &dev_priv->blt_ring);
2179 if (ret)
2180 return ret;
2181 2182
2182 return 0; 2183 return 0;
2183} 2184}
@@ -3153,11 +3154,11 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3153 * generation is designed to be run atomically and so is 3154 * generation is designed to be run atomically and so is
3154 * lockless. 3155 * lockless.
3155 */ 3156 */
3156 ring->user_irq_get(ring); 3157 ring->irq_get(ring);
3157 ret = wait_event_interruptible(ring->irq_queue, 3158 ret = wait_event_interruptible(ring->irq_queue,
3158 i915_seqno_passed(ring->get_seqno(ring), seqno) 3159 i915_seqno_passed(ring->get_seqno(ring), seqno)
3159 || atomic_read(&dev_priv->mm.wedged)); 3160 || atomic_read(&dev_priv->mm.wedged));
3160 ring->user_irq_put(ring); 3161 ring->irq_put(ring);
3161 3162
3162 if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) 3163 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3163 ret = -EIO; 3164 ret = -EIO;
@@ -3584,9 +3585,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3584 return 0; 3585 return 0;
3585 3586
3586cleanup_bsd_ring: 3587cleanup_bsd_ring:
3587 intel_cleanup_ring_buffer(&dev_priv->bsd_ring); 3588 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3588cleanup_render_ring: 3589cleanup_render_ring:
3589 intel_cleanup_ring_buffer(&dev_priv->render_ring); 3590 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3590 return ret; 3591 return ret;
3591} 3592}
3592 3593
@@ -3594,10 +3595,10 @@ void
3594i915_gem_cleanup_ringbuffer(struct drm_device *dev) 3595i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3595{ 3596{
3596 drm_i915_private_t *dev_priv = dev->dev_private; 3597 drm_i915_private_t *dev_priv = dev->dev_private;
3598 int i;
3597 3599
3598 intel_cleanup_ring_buffer(&dev_priv->render_ring); 3600 for (i = 0; i < I915_NUM_RINGS; i++)
3599 intel_cleanup_ring_buffer(&dev_priv->bsd_ring); 3601 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
3600 intel_cleanup_ring_buffer(&dev_priv->blt_ring);
3601} 3602}
3602 3603
3603int 3604int
@@ -3605,7 +3606,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3605 struct drm_file *file_priv) 3606 struct drm_file *file_priv)
3606{ 3607{
3607 drm_i915_private_t *dev_priv = dev->dev_private; 3608 drm_i915_private_t *dev_priv = dev->dev_private;
3608 int ret; 3609 int ret, i;
3609 3610
3610 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3611 if (drm_core_check_feature(dev, DRIVER_MODESET))
3611 return 0; 3612 return 0;
@@ -3625,14 +3626,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3625 } 3626 }
3626 3627
3627 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 3628 BUG_ON(!list_empty(&dev_priv->mm.active_list));
3628 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
3629 BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
3630 BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
3631 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 3629 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3632 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 3630 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3633 BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); 3631 for (i = 0; i < I915_NUM_RINGS; i++) {
3634 BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list)); 3632 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3635 BUG_ON(!list_empty(&dev_priv->blt_ring.request_list)); 3633 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3634 }
3636 mutex_unlock(&dev->struct_mutex); 3635 mutex_unlock(&dev->struct_mutex);
3637 3636
3638 ret = drm_irq_install(dev); 3637 ret = drm_irq_install(dev);
@@ -3695,9 +3694,8 @@ i915_gem_load(struct drm_device *dev)
3695 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 3694 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3696 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); 3695 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
3697 INIT_LIST_HEAD(&dev_priv->mm.gtt_list); 3696 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3698 init_ring_lists(&dev_priv->render_ring); 3697 for (i = 0; i < I915_NUM_RINGS; i++)
3699 init_ring_lists(&dev_priv->bsd_ring); 3698 init_ring_lists(&dev_priv->ring[i]);
3700 init_ring_lists(&dev_priv->blt_ring);
3701 for (i = 0; i < 16; i++) 3699 for (i = 0; i < 16; i++)
3702 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 3700 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3703 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 3701 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 9bdc495e17bb..6fc9cc485781 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -632,23 +632,59 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
632 uint32_t flush_rings) 632 uint32_t flush_rings)
633{ 633{
634 drm_i915_private_t *dev_priv = dev->dev_private; 634 drm_i915_private_t *dev_priv = dev->dev_private;
635 int i;
635 636
636 if (flush_domains & I915_GEM_DOMAIN_CPU) 637 if (flush_domains & I915_GEM_DOMAIN_CPU)
637 intel_gtt_chipset_flush(); 638 intel_gtt_chipset_flush();
638 639
639 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { 640 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
640 if (flush_rings & RING_RENDER) 641 for (i = 0; i < I915_NUM_RINGS; i++)
641 i915_gem_flush_ring(dev, &dev_priv->render_ring, 642 if (flush_rings & (1 << i))
642 invalidate_domains, flush_domains); 643 i915_gem_flush_ring(dev, &dev_priv->ring[i],
643 if (flush_rings & RING_BSD) 644 invalidate_domains,
644 i915_gem_flush_ring(dev, &dev_priv->bsd_ring, 645 flush_domains);
645 invalidate_domains, flush_domains);
646 if (flush_rings & RING_BLT)
647 i915_gem_flush_ring(dev, &dev_priv->blt_ring,
648 invalidate_domains, flush_domains);
649 } 646 }
650} 647}
651 648
649static int
650i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
651 struct intel_ring_buffer *to)
652{
653 struct intel_ring_buffer *from = obj->ring;
654 u32 seqno;
655 int ret, idx;
656
657 if (from == NULL || to == from)
658 return 0;
659
660 if (INTEL_INFO(obj->base.dev)->gen < 6)
661 return i915_gem_object_wait_rendering(obj, true);
662
663 idx = intel_ring_sync_index(from, to);
664
665 seqno = obj->last_rendering_seqno;
666 if (seqno <= from->sync_seqno[idx])
667 return 0;
668
669 if (seqno == from->outstanding_lazy_request) {
670 struct drm_i915_gem_request *request;
671
672 request = kzalloc(sizeof(*request), GFP_KERNEL);
673 if (request == NULL)
674 return -ENOMEM;
675
676 ret = i915_add_request(obj->base.dev, NULL, request, from);
677 if (ret) {
678 kfree(request);
679 return ret;
680 }
681
682 seqno = request->seqno;
683 }
684
685 from->sync_seqno[idx] = seqno;
686 return intel_ring_sync(to, from, seqno - 1);
687}
652 688
653static int 689static int
654i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, 690i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
@@ -678,12 +714,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
678 } 714 }
679 715
680 list_for_each_entry(obj, objects, exec_list) { 716 list_for_each_entry(obj, objects, exec_list) {
681 /* XXX replace with semaphores */ 717 ret = i915_gem_execbuffer_sync_rings(obj, ring);
682 if (obj->ring && ring != obj->ring) { 718 if (ret)
683 ret = i915_gem_object_wait_rendering(obj, true); 719 return ret;
684 if (ret)
685 return ret;
686 }
687 } 720 }
688 721
689 return 0; 722 return 0;
@@ -769,7 +802,8 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
769 802
770static void 803static void
771i915_gem_execbuffer_move_to_active(struct list_head *objects, 804i915_gem_execbuffer_move_to_active(struct list_head *objects,
772 struct intel_ring_buffer *ring) 805 struct intel_ring_buffer *ring,
806 u32 seqno)
773{ 807{
774 struct drm_i915_gem_object *obj; 808 struct drm_i915_gem_object *obj;
775 809
@@ -778,7 +812,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
778 obj->base.write_domain = obj->base.pending_write_domain; 812 obj->base.write_domain = obj->base.pending_write_domain;
779 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 813 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
780 814
781 i915_gem_object_move_to_active(obj, ring); 815 i915_gem_object_move_to_active(obj, ring, seqno);
782 if (obj->base.write_domain) { 816 if (obj->base.write_domain) {
783 obj->dirty = 1; 817 obj->dirty = 1;
784 obj->pending_gpu_write = true; 818 obj->pending_gpu_write = true;
@@ -833,6 +867,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
833 struct drm_clip_rect *cliprects = NULL; 867 struct drm_clip_rect *cliprects = NULL;
834 struct intel_ring_buffer *ring; 868 struct intel_ring_buffer *ring;
835 u32 exec_start, exec_len; 869 u32 exec_start, exec_len;
870 u32 seqno;
836 int ret, i; 871 int ret, i;
837 872
838 if (!i915_gem_check_execbuffer(args)) { 873 if (!i915_gem_check_execbuffer(args)) {
@@ -851,21 +886,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
851 switch (args->flags & I915_EXEC_RING_MASK) { 886 switch (args->flags & I915_EXEC_RING_MASK) {
852 case I915_EXEC_DEFAULT: 887 case I915_EXEC_DEFAULT:
853 case I915_EXEC_RENDER: 888 case I915_EXEC_RENDER:
854 ring = &dev_priv->render_ring; 889 ring = &dev_priv->ring[RCS];
855 break; 890 break;
856 case I915_EXEC_BSD: 891 case I915_EXEC_BSD:
857 if (!HAS_BSD(dev)) { 892 if (!HAS_BSD(dev)) {
858 DRM_ERROR("execbuf with invalid ring (BSD)\n"); 893 DRM_ERROR("execbuf with invalid ring (BSD)\n");
859 return -EINVAL; 894 return -EINVAL;
860 } 895 }
861 ring = &dev_priv->bsd_ring; 896 ring = &dev_priv->ring[VCS];
862 break; 897 break;
863 case I915_EXEC_BLT: 898 case I915_EXEC_BLT:
864 if (!HAS_BLT(dev)) { 899 if (!HAS_BLT(dev)) {
865 DRM_ERROR("execbuf with invalid ring (BLT)\n"); 900 DRM_ERROR("execbuf with invalid ring (BLT)\n");
866 return -EINVAL; 901 return -EINVAL;
867 } 902 }
868 ring = &dev_priv->blt_ring; 903 ring = &dev_priv->ring[BCS];
869 break; 904 break;
870 default: 905 default:
871 DRM_ERROR("execbuf with unknown ring: %d\n", 906 DRM_ERROR("execbuf with unknown ring: %d\n",
@@ -879,7 +914,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
879 } 914 }
880 915
881 if (args->num_cliprects != 0) { 916 if (args->num_cliprects != 0) {
882 if (ring != &dev_priv->render_ring) { 917 if (ring != &dev_priv->ring[RCS]) {
883 DRM_ERROR("clip rectangles are only valid with the render ring\n"); 918 DRM_ERROR("clip rectangles are only valid with the render ring\n");
884 return -EINVAL; 919 return -EINVAL;
885 } 920 }
@@ -972,6 +1007,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
972 if (ret) 1007 if (ret)
973 goto err; 1008 goto err;
974 1009
1010 seqno = i915_gem_next_request_seqno(dev, ring);
1011 for (i = 0; i < I915_NUM_RINGS-1; i++) {
1012 if (seqno < ring->sync_seqno[i]) {
1013 /* The GPU can not handle its semaphore value wrapping,
1014 * so every billion or so execbuffers, we need to stall
1015 * the GPU in order to reset the counters.
1016 */
1017 ret = i915_gpu_idle(dev);
1018 if (ret)
1019 goto err;
1020
1021 BUG_ON(ring->sync_seqno[i]);
1022 }
1023 }
1024
975 exec_start = batch_obj->gtt_offset + args->batch_start_offset; 1025 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
976 exec_len = args->batch_len; 1026 exec_len = args->batch_len;
977 if (cliprects) { 1027 if (cliprects) {
@@ -992,7 +1042,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
992 goto err; 1042 goto err;
993 } 1043 }
994 1044
995 i915_gem_execbuffer_move_to_active(&objects, ring); 1045 i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
996 i915_gem_execbuffer_retire_commands(dev, file, ring); 1046 i915_gem_execbuffer_retire_commands(dev, file, ring);
997 1047
998err: 1048err:
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9aa1e1dc5fd5..5e831b7eb3f1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -67,9 +67,9 @@
67void 67void
68ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 68ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
69{ 69{
70 if ((dev_priv->gt_irq_mask_reg & mask) != 0) { 70 if ((dev_priv->gt_irq_mask & mask) != 0) {
71 dev_priv->gt_irq_mask_reg &= ~mask; 71 dev_priv->gt_irq_mask &= ~mask;
72 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 72 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
73 POSTING_READ(GTIMR); 73 POSTING_READ(GTIMR);
74 } 74 }
75} 75}
@@ -77,9 +77,9 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
77void 77void
78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
79{ 79{
80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) { 80 if ((dev_priv->gt_irq_mask & mask) != mask) {
81 dev_priv->gt_irq_mask_reg |= mask; 81 dev_priv->gt_irq_mask |= mask;
82 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 82 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
83 POSTING_READ(GTIMR); 83 POSTING_READ(GTIMR);
84 } 84 }
85} 85}
@@ -88,9 +88,9 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
88static void 88static void
89ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 89ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
90{ 90{
91 if ((dev_priv->irq_mask_reg & mask) != 0) { 91 if ((dev_priv->irq_mask & mask) != 0) {
92 dev_priv->irq_mask_reg &= ~mask; 92 dev_priv->irq_mask &= ~mask;
93 I915_WRITE(DEIMR, dev_priv->irq_mask_reg); 93 I915_WRITE(DEIMR, dev_priv->irq_mask);
94 POSTING_READ(DEIMR); 94 POSTING_READ(DEIMR);
95 } 95 }
96} 96}
@@ -98,9 +98,9 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
98static inline void 98static inline void
99ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 99ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
100{ 100{
101 if ((dev_priv->irq_mask_reg & mask) != mask) { 101 if ((dev_priv->irq_mask & mask) != mask) {
102 dev_priv->irq_mask_reg |= mask; 102 dev_priv->irq_mask |= mask;
103 I915_WRITE(DEIMR, dev_priv->irq_mask_reg); 103 I915_WRITE(DEIMR, dev_priv->irq_mask);
104 POSTING_READ(DEIMR); 104 POSTING_READ(DEIMR);
105 } 105 }
106} 106}
@@ -108,9 +108,9 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
108void 108void
109i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 109i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
110{ 110{
111 if ((dev_priv->irq_mask_reg & mask) != 0) { 111 if ((dev_priv->irq_mask & mask) != 0) {
112 dev_priv->irq_mask_reg &= ~mask; 112 dev_priv->irq_mask &= ~mask;
113 I915_WRITE(IMR, dev_priv->irq_mask_reg); 113 I915_WRITE(IMR, dev_priv->irq_mask);
114 POSTING_READ(IMR); 114 POSTING_READ(IMR);
115 } 115 }
116} 116}
@@ -118,9 +118,9 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
118void 118void
119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
120{ 120{
121 if ((dev_priv->irq_mask_reg & mask) != mask) { 121 if ((dev_priv->irq_mask & mask) != mask) {
122 dev_priv->irq_mask_reg |= mask; 122 dev_priv->irq_mask |= mask;
123 I915_WRITE(IMR, dev_priv->irq_mask_reg); 123 I915_WRITE(IMR, dev_priv->irq_mask);
124 POSTING_READ(IMR); 124 POSTING_READ(IMR);
125 } 125 }
126} 126}
@@ -163,9 +163,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
163/** 163/**
164 * intel_enable_asle - enable ASLE interrupt for OpRegion 164 * intel_enable_asle - enable ASLE interrupt for OpRegion
165 */ 165 */
166void intel_enable_asle (struct drm_device *dev) 166void intel_enable_asle(struct drm_device *dev)
167{ 167{
168 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 168 drm_i915_private_t *dev_priv = dev->dev_private;
169 unsigned long irqflags;
170
171 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
169 172
170 if (HAS_PCH_SPLIT(dev)) 173 if (HAS_PCH_SPLIT(dev))
171 ironlake_enable_display_irq(dev_priv, DE_GSE); 174 ironlake_enable_display_irq(dev_priv, DE_GSE);
@@ -176,6 +179,8 @@ void intel_enable_asle (struct drm_device *dev)
176 i915_enable_pipestat(dev_priv, 0, 179 i915_enable_pipestat(dev_priv, 0,
177 PIPE_LEGACY_BLC_EVENT_ENABLE); 180 PIPE_LEGACY_BLC_EVENT_ENABLE);
178 } 181 }
182
183 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
179} 184}
180 185
181/** 186/**
@@ -344,12 +349,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
344 READ_BREADCRUMB(dev_priv); 349 READ_BREADCRUMB(dev_priv);
345 } 350 }
346 351
347 if (gt_iir & GT_PIPE_NOTIFY) 352 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
348 notify_ring(dev, &dev_priv->render_ring); 353 notify_ring(dev, &dev_priv->ring[RCS]);
349 if (gt_iir & bsd_usr_interrupt) 354 if (gt_iir & bsd_usr_interrupt)
350 notify_ring(dev, &dev_priv->bsd_ring); 355 notify_ring(dev, &dev_priv->ring[VCS]);
351 if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT) 356 if (gt_iir & GT_BLT_USER_INTERRUPT)
352 notify_ring(dev, &dev_priv->blt_ring); 357 notify_ring(dev, &dev_priv->ring[BCS]);
353 358
354 if (de_iir & DE_GSE) 359 if (de_iir & DE_GSE)
355 intel_opregion_gse_intr(dev); 360 intel_opregion_gse_intr(dev);
@@ -640,8 +645,7 @@ static void i915_capture_error_state(struct drm_device *dev)
640 645
641 DRM_DEBUG_DRIVER("generating error event\n"); 646 DRM_DEBUG_DRIVER("generating error event\n");
642 647
643 error->seqno = 648 error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
644 dev_priv->render_ring.get_seqno(&dev_priv->render_ring);
645 error->eir = I915_READ(EIR); 649 error->eir = I915_READ(EIR);
646 error->pgtbl_er = I915_READ(PGTBL_ER); 650 error->pgtbl_er = I915_READ(PGTBL_ER);
647 error->pipeastat = I915_READ(PIPEASTAT); 651 error->pipeastat = I915_READ(PIPEASTAT);
@@ -656,16 +660,16 @@ static void i915_capture_error_state(struct drm_device *dev)
656 error->bcs_ipeir = I915_READ(BCS_IPEIR); 660 error->bcs_ipeir = I915_READ(BCS_IPEIR);
657 error->bcs_instdone = I915_READ(BCS_INSTDONE); 661 error->bcs_instdone = I915_READ(BCS_INSTDONE);
658 error->bcs_seqno = 0; 662 error->bcs_seqno = 0;
659 if (dev_priv->blt_ring.get_seqno) 663 if (dev_priv->ring[BCS].get_seqno)
660 error->bcs_seqno = dev_priv->blt_ring.get_seqno(&dev_priv->blt_ring); 664 error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
661 665
662 error->vcs_acthd = I915_READ(VCS_ACTHD); 666 error->vcs_acthd = I915_READ(VCS_ACTHD);
663 error->vcs_ipehr = I915_READ(VCS_IPEHR); 667 error->vcs_ipehr = I915_READ(VCS_IPEHR);
664 error->vcs_ipeir = I915_READ(VCS_IPEIR); 668 error->vcs_ipeir = I915_READ(VCS_IPEIR);
665 error->vcs_instdone = I915_READ(VCS_INSTDONE); 669 error->vcs_instdone = I915_READ(VCS_INSTDONE);
666 error->vcs_seqno = 0; 670 error->vcs_seqno = 0;
667 if (dev_priv->bsd_ring.get_seqno) 671 if (dev_priv->ring[VCS].get_seqno)
668 error->vcs_seqno = dev_priv->bsd_ring.get_seqno(&dev_priv->bsd_ring); 672 error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
669 } 673 }
670 if (INTEL_INFO(dev)->gen >= 4) { 674 if (INTEL_INFO(dev)->gen >= 4) {
671 error->ipeir = I915_READ(IPEIR_I965); 675 error->ipeir = I915_READ(IPEIR_I965);
@@ -684,7 +688,7 @@ static void i915_capture_error_state(struct drm_device *dev)
684 } 688 }
685 i915_gem_record_fences(dev, error); 689 i915_gem_record_fences(dev, error);
686 690
687 bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->render_ring); 691 bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]);
688 692
689 /* Grab the current batchbuffer, most likely to have crashed. */ 693 /* Grab the current batchbuffer, most likely to have crashed. */
690 batchbuffer[0] = NULL; 694 batchbuffer[0] = NULL;
@@ -748,7 +752,7 @@ static void i915_capture_error_state(struct drm_device *dev)
748 752
749 /* Record the ringbuffer */ 753 /* Record the ringbuffer */
750 error->ringbuffer = i915_error_object_create(dev, 754 error->ringbuffer = i915_error_object_create(dev,
751 dev_priv->render_ring.obj); 755 dev_priv->ring[RCS].obj);
752 756
753 /* Record buffers on the active and pinned lists. */ 757 /* Record buffers on the active and pinned lists. */
754 error->active_bo = NULL; 758 error->active_bo = NULL;
@@ -949,11 +953,11 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
949 /* 953 /*
950 * Wakeup waiting processes so they don't hang 954 * Wakeup waiting processes so they don't hang
951 */ 955 */
952 wake_up_all(&dev_priv->render_ring.irq_queue); 956 wake_up_all(&dev_priv->ring[RCS].irq_queue);
953 if (HAS_BSD(dev)) 957 if (HAS_BSD(dev))
954 wake_up_all(&dev_priv->bsd_ring.irq_queue); 958 wake_up_all(&dev_priv->ring[VCS].irq_queue);
955 if (HAS_BLT(dev)) 959 if (HAS_BLT(dev))
956 wake_up_all(&dev_priv->blt_ring.irq_queue); 960 wake_up_all(&dev_priv->ring[BCS].irq_queue);
957 } 961 }
958 962
959 queue_work(dev_priv->wq, &dev_priv->error_work); 963 queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -1035,7 +1039,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1035 * It doesn't set the bit in iir again, but it still produces 1039 * It doesn't set the bit in iir again, but it still produces
1036 * interrupts (for non-MSI). 1040 * interrupts (for non-MSI).
1037 */ 1041 */
1038 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1042 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1039 pipea_stats = I915_READ(PIPEASTAT); 1043 pipea_stats = I915_READ(PIPEASTAT);
1040 pipeb_stats = I915_READ(PIPEBSTAT); 1044 pipeb_stats = I915_READ(PIPEBSTAT);
1041 1045
@@ -1058,7 +1062,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1058 I915_WRITE(PIPEBSTAT, pipeb_stats); 1062 I915_WRITE(PIPEBSTAT, pipeb_stats);
1059 irq_received = 1; 1063 irq_received = 1;
1060 } 1064 }
1061 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1065 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1062 1066
1063 if (!irq_received) 1067 if (!irq_received)
1064 break; 1068 break;
@@ -1091,9 +1095,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1091 } 1095 }
1092 1096
1093 if (iir & I915_USER_INTERRUPT) 1097 if (iir & I915_USER_INTERRUPT)
1094 notify_ring(dev, &dev_priv->render_ring); 1098 notify_ring(dev, &dev_priv->ring[RCS]);
1095 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) 1099 if (iir & I915_BSD_USER_INTERRUPT)
1096 notify_ring(dev, &dev_priv->bsd_ring); 1100 notify_ring(dev, &dev_priv->ring[VCS]);
1097 1101
1098 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 1102 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1099 intel_prepare_page_flip(dev, 0); 1103 intel_prepare_page_flip(dev, 0);
@@ -1180,10 +1184,10 @@ static int i915_emit_irq(struct drm_device * dev)
1180void i915_trace_irq_get(struct drm_device *dev, u32 seqno) 1184void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
1181{ 1185{
1182 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1186 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1183 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 1187 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1184 1188
1185 if (dev_priv->trace_irq_seqno == 0) 1189 if (dev_priv->trace_irq_seqno == 0)
1186 render_ring->user_irq_get(render_ring); 1190 ring->irq_get(ring);
1187 1191
1188 dev_priv->trace_irq_seqno = seqno; 1192 dev_priv->trace_irq_seqno = seqno;
1189} 1193}
@@ -1193,7 +1197,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1197 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1194 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1198 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1195 int ret = 0; 1199 int ret = 0;
1196 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 1200 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1197 1201
1198 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, 1202 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1199 READ_BREADCRUMB(dev_priv)); 1203 READ_BREADCRUMB(dev_priv));
@@ -1207,10 +1211,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1207 if (master_priv->sarea_priv) 1211 if (master_priv->sarea_priv)
1208 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1212 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1209 1213
1210 render_ring->user_irq_get(render_ring); 1214 ring->irq_get(ring);
1211 DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, 1215 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
1212 READ_BREADCRUMB(dev_priv) >= irq_nr); 1216 READ_BREADCRUMB(dev_priv) >= irq_nr);
1213 render_ring->user_irq_put(render_ring); 1217 ring->irq_put(ring);
1214 1218
1215 if (ret == -EBUSY) { 1219 if (ret == -EBUSY) {
1216 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1220 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1229,7 +1233,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
1229 drm_i915_irq_emit_t *emit = data; 1233 drm_i915_irq_emit_t *emit = data;
1230 int result; 1234 int result;
1231 1235
1232 if (!dev_priv || !dev_priv->render_ring.virtual_start) { 1236 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
1233 DRM_ERROR("called with no initialization\n"); 1237 DRM_ERROR("called with no initialization\n");
1234 return -EINVAL; 1238 return -EINVAL;
1235 } 1239 }
@@ -1275,9 +1279,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1275 if (!i915_pipe_enabled(dev, pipe)) 1279 if (!i915_pipe_enabled(dev, pipe))
1276 return -EINVAL; 1280 return -EINVAL;
1277 1281
1278 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1282 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1279 if (HAS_PCH_SPLIT(dev)) 1283 if (HAS_PCH_SPLIT(dev))
1280 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1284 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1281 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1285 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1282 else if (INTEL_INFO(dev)->gen >= 4) 1286 else if (INTEL_INFO(dev)->gen >= 4)
1283 i915_enable_pipestat(dev_priv, pipe, 1287 i915_enable_pipestat(dev_priv, pipe,
@@ -1285,7 +1289,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1285 else 1289 else
1286 i915_enable_pipestat(dev_priv, pipe, 1290 i915_enable_pipestat(dev_priv, pipe,
1287 PIPE_VBLANK_INTERRUPT_ENABLE); 1291 PIPE_VBLANK_INTERRUPT_ENABLE);
1288 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1292 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1289 return 0; 1293 return 0;
1290} 1294}
1291 1295
@@ -1297,15 +1301,15 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
1297 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1301 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1298 unsigned long irqflags; 1302 unsigned long irqflags;
1299 1303
1300 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1304 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1301 if (HAS_PCH_SPLIT(dev)) 1305 if (HAS_PCH_SPLIT(dev))
1302 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1306 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1303 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1307 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1304 else 1308 else
1305 i915_disable_pipestat(dev_priv, pipe, 1309 i915_disable_pipestat(dev_priv, pipe,
1306 PIPE_VBLANK_INTERRUPT_ENABLE | 1310 PIPE_VBLANK_INTERRUPT_ENABLE |
1307 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1311 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1308 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1312 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1309} 1313}
1310 1314
1311void i915_enable_interrupt (struct drm_device *dev) 1315void i915_enable_interrupt (struct drm_device *dev)
@@ -1397,6 +1401,27 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1397 return false; 1401 return false;
1398} 1402}
1399 1403
1404static bool kick_ring(struct intel_ring_buffer *ring)
1405{
1406 struct drm_device *dev = ring->dev;
1407 struct drm_i915_private *dev_priv = dev->dev_private;
1408 u32 tmp = I915_READ_CTL(ring);
1409 if (tmp & RING_WAIT) {
1410 DRM_ERROR("Kicking stuck wait on %s\n",
1411 ring->name);
1412 I915_WRITE_CTL(ring, tmp);
1413 return true;
1414 }
1415 if (IS_GEN6(dev) &&
1416 (tmp & RING_WAIT_SEMAPHORE)) {
1417 DRM_ERROR("Kicking stuck semaphore on %s\n",
1418 ring->name);
1419 I915_WRITE_CTL(ring, tmp);
1420 return true;
1421 }
1422 return false;
1423}
1424
1400/** 1425/**
1401 * This is called when the chip hasn't reported back with completed 1426 * This is called when the chip hasn't reported back with completed
1402 * batchbuffers in a long time. The first time this is called we simply record 1427 * batchbuffers in a long time. The first time this is called we simply record
@@ -1411,9 +1436,9 @@ void i915_hangcheck_elapsed(unsigned long data)
1411 bool err = false; 1436 bool err = false;
1412 1437
1413 /* If all work is done then ACTHD clearly hasn't advanced. */ 1438 /* If all work is done then ACTHD clearly hasn't advanced. */
1414 if (i915_hangcheck_ring_idle(&dev_priv->render_ring, &err) && 1439 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
1415 i915_hangcheck_ring_idle(&dev_priv->bsd_ring, &err) && 1440 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
1416 i915_hangcheck_ring_idle(&dev_priv->blt_ring, &err)) { 1441 i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
1417 dev_priv->hangcheck_count = 0; 1442 dev_priv->hangcheck_count = 0;
1418 if (err) 1443 if (err)
1419 goto repeat; 1444 goto repeat;
@@ -1442,12 +1467,17 @@ void i915_hangcheck_elapsed(unsigned long data)
1442 * and break the hang. This should work on 1467 * and break the hang. This should work on
1443 * all but the second generation chipsets. 1468 * all but the second generation chipsets.
1444 */ 1469 */
1445 struct intel_ring_buffer *ring = &dev_priv->render_ring; 1470
1446 u32 tmp = I915_READ_CTL(ring); 1471 if (kick_ring(&dev_priv->ring[RCS]))
1447 if (tmp & RING_WAIT) { 1472 goto repeat;
1448 I915_WRITE_CTL(ring, tmp); 1473
1474 if (HAS_BSD(dev) &&
1475 kick_ring(&dev_priv->ring[VCS]))
1476 goto repeat;
1477
1478 if (HAS_BLT(dev) &&
1479 kick_ring(&dev_priv->ring[BCS]))
1449 goto repeat; 1480 goto repeat;
1450 }
1451 } 1481 }
1452 1482
1453 i915_handle_error(dev, true); 1483 i915_handle_error(dev, true);
@@ -1498,37 +1528,37 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1498 /* enable kind of interrupts always enabled */ 1528 /* enable kind of interrupts always enabled */
1499 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1529 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1500 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1530 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1501 u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; 1531 u32 render_irqs;
1502 u32 hotplug_mask; 1532 u32 hotplug_mask;
1503 1533
1504 dev_priv->irq_mask_reg = ~display_mask; 1534 dev_priv->irq_mask = ~display_mask;
1505 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1506 1535
1507 /* should always can generate irq */ 1536 /* should always can generate irq */
1508 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1537 I915_WRITE(DEIIR, I915_READ(DEIIR));
1509 I915_WRITE(DEIMR, dev_priv->irq_mask_reg); 1538 I915_WRITE(DEIMR, dev_priv->irq_mask);
1510 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); 1539 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1511 POSTING_READ(DEIER); 1540 POSTING_READ(DEIER);
1512 1541
1513 if (IS_GEN6(dev)) { 1542 dev_priv->gt_irq_mask = ~0;
1514 render_mask =
1515 GT_PIPE_NOTIFY |
1516 GT_GEN6_BSD_USER_INTERRUPT |
1517 GT_BLT_USER_INTERRUPT;
1518 }
1519
1520 dev_priv->gt_irq_mask_reg = ~render_mask;
1521 dev_priv->gt_irq_enable_reg = render_mask;
1522 1543
1523 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1544 I915_WRITE(GTIIR, I915_READ(GTIIR));
1524 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 1545 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1525 if (IS_GEN6(dev)) { 1546 if (IS_GEN6(dev)) {
1526 I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); 1547 I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT);
1527 I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); 1548 I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT);
1528 I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); 1549 I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
1529 } 1550 }
1530 1551
1531 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); 1552 if (IS_GEN6(dev))
1553 render_irqs =
1554 GT_USER_INTERRUPT |
1555 GT_GEN6_BSD_USER_INTERRUPT |
1556 GT_BLT_USER_INTERRUPT;
1557 else
1558 render_irqs =
1559 GT_PIPE_NOTIFY |
1560 GT_BSD_USER_INTERRUPT;
1561 I915_WRITE(GTIER, render_irqs);
1532 POSTING_READ(GTIER); 1562 POSTING_READ(GTIER);
1533 1563
1534 if (HAS_PCH_CPT(dev)) { 1564 if (HAS_PCH_CPT(dev)) {
@@ -1539,12 +1569,11 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1539 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1569 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1540 } 1570 }
1541 1571
1542 dev_priv->pch_irq_mask_reg = ~hotplug_mask; 1572 dev_priv->pch_irq_mask = ~hotplug_mask;
1543 dev_priv->pch_irq_enable_reg = hotplug_mask;
1544 1573
1545 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1574 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1546 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); 1575 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1547 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); 1576 I915_WRITE(SDEIER, hotplug_mask);
1548 POSTING_READ(SDEIER); 1577 POSTING_READ(SDEIER);
1549 1578
1550 if (IS_IRONLAKE_M(dev)) { 1579 if (IS_IRONLAKE_M(dev)) {
@@ -1594,11 +1623,11 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1594 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1623 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1595 u32 error_mask; 1624 u32 error_mask;
1596 1625
1597 DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); 1626 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1598 if (HAS_BSD(dev)) 1627 if (HAS_BSD(dev))
1599 DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); 1628 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1600 if (HAS_BLT(dev)) 1629 if (HAS_BLT(dev))
1601 DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue); 1630 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1602 1631
1603 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1632 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1604 1633
@@ -1606,7 +1635,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1606 return ironlake_irq_postinstall(dev); 1635 return ironlake_irq_postinstall(dev);
1607 1636
1608 /* Unmask the interrupts that we always want on. */ 1637 /* Unmask the interrupts that we always want on. */
1609 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; 1638 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
1610 1639
1611 dev_priv->pipestat[0] = 0; 1640 dev_priv->pipestat[0] = 0;
1612 dev_priv->pipestat[1] = 0; 1641 dev_priv->pipestat[1] = 0;
@@ -1615,7 +1644,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1615 /* Enable in IER... */ 1644 /* Enable in IER... */
1616 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1645 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1617 /* and unmask in IMR */ 1646 /* and unmask in IMR */
1618 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; 1647 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1619 } 1648 }
1620 1649
1621 /* 1650 /*
@@ -1633,7 +1662,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1633 } 1662 }
1634 I915_WRITE(EMR, error_mask); 1663 I915_WRITE(EMR, error_mask);
1635 1664
1636 I915_WRITE(IMR, dev_priv->irq_mask_reg); 1665 I915_WRITE(IMR, dev_priv->irq_mask);
1637 I915_WRITE(IER, enable_mask); 1666 I915_WRITE(IER, enable_mask);
1638 POSTING_READ(IER); 1667 POSTING_READ(IER);
1639 1668
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 06175e98c5bb..3e03094cf148 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -176,6 +176,11 @@
176#define MI_BATCH_NON_SECURE (1) 176#define MI_BATCH_NON_SECURE (1)
177#define MI_BATCH_NON_SECURE_I965 (1<<8) 177#define MI_BATCH_NON_SECURE_I965 (1<<8)
178#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 178#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
179#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
180#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
181#define MI_SEMAPHORE_UPDATE (1<<21)
182#define MI_SEMAPHORE_COMPARE (1<<20)
183#define MI_SEMAPHORE_REGISTER (1<<18)
179/* 184/*
180 * 3D instructions used by the kernel 185 * 3D instructions used by the kernel
181 */ 186 */
@@ -276,9 +281,12 @@
276#define RING_HEAD(base) ((base)+0x34) 281#define RING_HEAD(base) ((base)+0x34)
277#define RING_START(base) ((base)+0x38) 282#define RING_START(base) ((base)+0x38)
278#define RING_CTL(base) ((base)+0x3c) 283#define RING_CTL(base) ((base)+0x3c)
284#define RING_SYNC_0(base) ((base)+0x40)
285#define RING_SYNC_1(base) ((base)+0x44)
279#define RING_HWS_PGA(base) ((base)+0x80) 286#define RING_HWS_PGA(base) ((base)+0x80)
280#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 287#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
281#define RING_ACTHD(base) ((base)+0x74) 288#define RING_ACTHD(base) ((base)+0x74)
289#define RING_NOPID(base) ((base)+0x94)
282#define TAIL_ADDR 0x001FFFF8 290#define TAIL_ADDR 0x001FFFF8
283#define HEAD_WRAP_COUNT 0xFFE00000 291#define HEAD_WRAP_COUNT 0xFFE00000
284#define HEAD_WRAP_ONE 0x00200000 292#define HEAD_WRAP_ONE 0x00200000
@@ -293,6 +301,7 @@
293#define RING_INVALID 0x00000000 301#define RING_INVALID 0x00000000
294#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ 302#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
295#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ 303#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
304#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
296#if 0 305#if 0
297#define PRB0_TAIL 0x02030 306#define PRB0_TAIL 0x02030
298#define PRB0_HEAD 0x02034 307#define PRB0_HEAD 0x02034
@@ -347,6 +356,14 @@
347# define VS_TIMER_DISPATCH (1 << 6) 356# define VS_TIMER_DISPATCH (1 << 6)
348# define MI_FLUSH_ENABLE (1 << 11) 357# define MI_FLUSH_ENABLE (1 << 11)
349 358
359#define GFX_MODE 0x02520
360#define GFX_RUN_LIST_ENABLE (1<<15)
361#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
362#define GFX_SURFACE_FAULT_ENABLE (1<<12)
363#define GFX_REPLAY_MODE (1<<11)
364#define GFX_PSMI_GRANULARITY (1<<10)
365#define GFX_PPGTT_ENABLE (1<<9)
366
350#define SCPD0 0x0209c /* 915+ only */ 367#define SCPD0 0x0209c /* 915+ only */
351#define IER 0x020a0 368#define IER 0x020a0
352#define IIR 0x020a4 369#define IIR 0x020a4
@@ -498,7 +515,7 @@
498#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) 515#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
499 516
500#define GEN6_BSD_IMR 0x120a8 517#define GEN6_BSD_IMR 0x120a8
501#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12) 518#define GEN6_BSD_USER_INTERRUPT (1 << 12)
502 519
503#define GEN6_BSD_RNCID 0x12198 520#define GEN6_BSD_RNCID 0x12198
504 521
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3063edd2456f..0b6272a2edfc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1998,7 +1998,7 @@ static void intel_clear_scanline_wait(struct drm_device *dev)
1998 /* Can't break the hang on i8xx */ 1998 /* Can't break the hang on i8xx */
1999 return; 1999 return;
2000 2000
2001 ring = &dev_priv->render_ring; 2001 ring = LP_RING(dev_priv);
2002 tmp = I915_READ_CTL(ring); 2002 tmp = I915_READ_CTL(ring);
2003 if (tmp & RING_WAIT) 2003 if (tmp & RING_WAIT)
2004 I915_WRITE_CTL(ring, tmp); 2004 I915_WRITE_CTL(ring, tmp);
@@ -5124,7 +5124,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5124 obj = intel_fb->obj; 5124 obj = intel_fb->obj;
5125 5125
5126 mutex_lock(&dev->struct_mutex); 5126 mutex_lock(&dev->struct_mutex);
5127 ret = intel_pin_and_fence_fb_obj(dev, obj, &dev_priv->render_ring); 5127 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
5128 if (ret) 5128 if (ret)
5129 goto cleanup_work; 5129 goto cleanup_work;
5130 5130
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 9b0d9a867aea..f295a7aaadf9 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -273,14 +273,8 @@ void intel_opregion_enable_asle(struct drm_device *dev)
273 struct opregion_asle *asle = dev_priv->opregion.asle; 273 struct opregion_asle *asle = dev_priv->opregion.asle;
274 274
275 if (asle) { 275 if (asle) {
276 if (IS_MOBILE(dev)) { 276 if (IS_MOBILE(dev))
277 unsigned long irqflags;
278
279 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
280 intel_enable_asle(dev); 277 intel_enable_asle(dev);
281 spin_unlock_irqrestore(&dev_priv->user_irq_lock,
282 irqflags);
283 }
284 278
285 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | 279 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
286 ASLE_PFMB_EN; 280 ASLE_PFMB_EN;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d0c1add393a3..3fbb98b948d6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -221,7 +221,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
221 int ret; 221 int ret;
222 222
223 BUG_ON(overlay->last_flip_req); 223 BUG_ON(overlay->last_flip_req);
224 ret = i915_add_request(dev, NULL, request, &dev_priv->render_ring); 224 ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
225 if (ret) { 225 if (ret) {
226 kfree(request); 226 kfree(request);
227 return ret; 227 return ret;
@@ -230,7 +230,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
230 overlay->flip_tail = tail; 230 overlay->flip_tail = tail;
231 ret = i915_do_wait_request(dev, 231 ret = i915_do_wait_request(dev,
232 overlay->last_flip_req, true, 232 overlay->last_flip_req, true,
233 &dev_priv->render_ring); 233 LP_RING(dev_priv));
234 if (ret) 234 if (ret)
235 return ret; 235 return ret;
236 236
@@ -364,7 +364,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
364 OUT_RING(flip_addr); 364 OUT_RING(flip_addr);
365 ADVANCE_LP_RING(); 365 ADVANCE_LP_RING();
366 366
367 ret = i915_add_request(dev, NULL, request, &dev_priv->render_ring); 367 ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
368 if (ret) { 368 if (ret) {
369 kfree(request); 369 kfree(request);
370 return ret; 370 return ret;
@@ -454,7 +454,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
454 return 0; 454 return 0;
455 455
456 ret = i915_do_wait_request(dev, overlay->last_flip_req, 456 ret = i915_do_wait_request(dev, overlay->last_flip_req,
457 interruptible, &dev_priv->render_ring); 457 interruptible, LP_RING(dev_priv));
458 if (ret) 458 if (ret)
459 return ret; 459 return ret;
460 460
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 21871b0766e2..f71db0cf4909 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -203,6 +203,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
203 if (ring->space < 0) 203 if (ring->space < 0)
204 ring->space += ring->size; 204 ring->space += ring->size;
205 } 205 }
206
206 return 0; 207 return 0;
207} 208}
208 209
@@ -281,17 +282,18 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
281static int init_render_ring(struct intel_ring_buffer *ring) 282static int init_render_ring(struct intel_ring_buffer *ring)
282{ 283{
283 struct drm_device *dev = ring->dev; 284 struct drm_device *dev = ring->dev;
285 struct drm_i915_private *dev_priv = dev->dev_private;
284 int ret = init_ring_common(ring); 286 int ret = init_ring_common(ring);
285 287
286 if (INTEL_INFO(dev)->gen > 3) { 288 if (INTEL_INFO(dev)->gen > 3) {
287 drm_i915_private_t *dev_priv = dev->dev_private;
288 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; 289 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
289 if (IS_GEN6(dev)) 290 if (IS_GEN6(dev))
290 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; 291 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
291 I915_WRITE(MI_MODE, mode); 292 I915_WRITE(MI_MODE, mode);
292 } 293 }
293 294
294 if (HAS_PIPE_CONTROL(dev)) { 295 if (INTEL_INFO(dev)->gen >= 6) {
296 } else if (HAS_PIPE_CONTROL(dev)) {
295 ret = init_pipe_control(ring); 297 ret = init_pipe_control(ring);
296 if (ret) 298 if (ret)
297 return ret; 299 return ret;
@@ -308,6 +310,80 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
308 cleanup_pipe_control(ring); 310 cleanup_pipe_control(ring);
309} 311}
310 312
313static void
314update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
315{
316 struct drm_device *dev = ring->dev;
317 struct drm_i915_private *dev_priv = dev->dev_private;
318 int id;
319
320 /*
321 * cs -> 1 = vcs, 0 = bcs
322 * vcs -> 1 = bcs, 0 = cs,
323 * bcs -> 1 = cs, 0 = vcs.
324 */
325 id = ring - dev_priv->ring;
326 id += 2 - i;
327 id %= 3;
328
329 intel_ring_emit(ring,
330 MI_SEMAPHORE_MBOX |
331 MI_SEMAPHORE_REGISTER |
332 MI_SEMAPHORE_UPDATE);
333 intel_ring_emit(ring, seqno);
334 intel_ring_emit(ring,
335 RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
336}
337
338static int
339gen6_add_request(struct intel_ring_buffer *ring,
340 u32 *result)
341{
342 u32 seqno;
343 int ret;
344
345 ret = intel_ring_begin(ring, 10);
346 if (ret)
347 return ret;
348
349 seqno = i915_gem_get_seqno(ring->dev);
350 update_semaphore(ring, 0, seqno);
351 update_semaphore(ring, 1, seqno);
352
353 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
354 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
355 intel_ring_emit(ring, seqno);
356 intel_ring_emit(ring, MI_USER_INTERRUPT);
357 intel_ring_advance(ring);
358
359 *result = seqno;
360 return 0;
361}
362
363int
364intel_ring_sync(struct intel_ring_buffer *ring,
365 struct intel_ring_buffer *to,
366 u32 seqno)
367{
368 int ret;
369
370 ret = intel_ring_begin(ring, 4);
371 if (ret)
372 return ret;
373
374 intel_ring_emit(ring,
375 MI_SEMAPHORE_MBOX |
376 MI_SEMAPHORE_REGISTER |
377 intel_ring_sync_index(ring, to) << 17 |
378 MI_SEMAPHORE_COMPARE);
379 intel_ring_emit(ring, seqno);
380 intel_ring_emit(ring, 0);
381 intel_ring_emit(ring, MI_NOOP);
382 intel_ring_advance(ring);
383
384 return 0;
385}
386
311#define PIPE_CONTROL_FLUSH(ring__, addr__) \ 387#define PIPE_CONTROL_FLUSH(ring__, addr__) \
312do { \ 388do { \
313 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ 389 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
@@ -317,131 +393,128 @@ do { \
317 intel_ring_emit(ring__, 0); \ 393 intel_ring_emit(ring__, 0); \
318} while (0) 394} while (0)
319 395
320/**
321 * Creates a new sequence number, emitting a write of it to the status page
322 * plus an interrupt, which will trigger i915_user_interrupt_handler.
323 *
324 * Must be called with struct_lock held.
325 *
326 * Returned sequence numbers are nonzero on success.
327 */
328static int 396static int
329render_ring_add_request(struct intel_ring_buffer *ring, 397pc_render_add_request(struct intel_ring_buffer *ring,
330 u32 *result) 398 u32 *result)
331{ 399{
332 struct drm_device *dev = ring->dev; 400 struct drm_device *dev = ring->dev;
333 u32 seqno = i915_gem_get_seqno(dev); 401 u32 seqno = i915_gem_get_seqno(dev);
334 struct pipe_control *pc = ring->private; 402 struct pipe_control *pc = ring->private;
403 u32 scratch_addr = pc->gtt_offset + 128;
335 int ret; 404 int ret;
336 405
337 if (IS_GEN6(dev)) { 406 /*
338 ret = intel_ring_begin(ring, 6); 407 * Workaround qword write incoherence by flushing the
339 if (ret) 408 * PIPE_NOTIFY buffers out to memory before requesting
340 return ret; 409 * an interrupt.
341 410 */
342 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); 411 ret = intel_ring_begin(ring, 32);
343 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | 412 if (ret)
344 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | 413 return ret;
345 PIPE_CONTROL_NOTIFY);
346 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
347 intel_ring_emit(ring, seqno);
348 intel_ring_emit(ring, 0);
349 intel_ring_emit(ring, 0);
350 } else if (HAS_PIPE_CONTROL(dev)) {
351 u32 scratch_addr = pc->gtt_offset + 128;
352 414
353 /* 415 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
354 * Workaround qword write incoherence by flushing the 416 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
355 * PIPE_NOTIFY buffers out to memory before requesting 417 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
356 * an interrupt. 418 intel_ring_emit(ring, seqno);
357 */ 419 intel_ring_emit(ring, 0);
358 ret = intel_ring_begin(ring, 32); 420 PIPE_CONTROL_FLUSH(ring, scratch_addr);
359 if (ret) 421 scratch_addr += 128; /* write to separate cachelines */
360 return ret; 422 PIPE_CONTROL_FLUSH(ring, scratch_addr);
423 scratch_addr += 128;
424 PIPE_CONTROL_FLUSH(ring, scratch_addr);
425 scratch_addr += 128;
426 PIPE_CONTROL_FLUSH(ring, scratch_addr);
427 scratch_addr += 128;
428 PIPE_CONTROL_FLUSH(ring, scratch_addr);
429 scratch_addr += 128;
430 PIPE_CONTROL_FLUSH(ring, scratch_addr);
431 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
432 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
433 PIPE_CONTROL_NOTIFY);
434 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
435 intel_ring_emit(ring, seqno);
436 intel_ring_emit(ring, 0);
437 intel_ring_advance(ring);
361 438
362 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | 439 *result = seqno;
363 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); 440 return 0;
364 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 441}
365 intel_ring_emit(ring, seqno);
366 intel_ring_emit(ring, 0);
367 PIPE_CONTROL_FLUSH(ring, scratch_addr);
368 scratch_addr += 128; /* write to separate cachelines */
369 PIPE_CONTROL_FLUSH(ring, scratch_addr);
370 scratch_addr += 128;
371 PIPE_CONTROL_FLUSH(ring, scratch_addr);
372 scratch_addr += 128;
373 PIPE_CONTROL_FLUSH(ring, scratch_addr);
374 scratch_addr += 128;
375 PIPE_CONTROL_FLUSH(ring, scratch_addr);
376 scratch_addr += 128;
377 PIPE_CONTROL_FLUSH(ring, scratch_addr);
378 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
379 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
380 PIPE_CONTROL_NOTIFY);
381 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
382 intel_ring_emit(ring, seqno);
383 intel_ring_emit(ring, 0);
384 } else {
385 ret = intel_ring_begin(ring, 4);
386 if (ret)
387 return ret;
388 442
389 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 443static int
390 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 444render_ring_add_request(struct intel_ring_buffer *ring,
391 intel_ring_emit(ring, seqno); 445 u32 *result)
446{
447 struct drm_device *dev = ring->dev;
448 u32 seqno = i915_gem_get_seqno(dev);
449 int ret;
392 450
393 intel_ring_emit(ring, MI_USER_INTERRUPT); 451 ret = intel_ring_begin(ring, 4);
394 } 452 if (ret)
453 return ret;
395 454
455 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
456 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
457 intel_ring_emit(ring, seqno);
458 intel_ring_emit(ring, MI_USER_INTERRUPT);
396 intel_ring_advance(ring); 459 intel_ring_advance(ring);
460
397 *result = seqno; 461 *result = seqno;
398 return 0; 462 return 0;
399} 463}
400 464
401static u32 465static u32
402render_ring_get_seqno(struct intel_ring_buffer *ring) 466ring_get_seqno(struct intel_ring_buffer *ring)
403{ 467{
404 struct drm_device *dev = ring->dev; 468 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
405 if (HAS_PIPE_CONTROL(dev)) { 469}
406 struct pipe_control *pc = ring->private; 470
407 return pc->cpu_page[0]; 471static u32
408 } else 472pc_render_get_seqno(struct intel_ring_buffer *ring)
409 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 473{
474 struct pipe_control *pc = ring->private;
475 return pc->cpu_page[0];
410} 476}
411 477
412static void 478static void
413render_ring_get_user_irq(struct intel_ring_buffer *ring) 479render_ring_get_irq(struct intel_ring_buffer *ring)
414{ 480{
415 struct drm_device *dev = ring->dev; 481 struct drm_device *dev = ring->dev;
416 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
417 unsigned long irqflags;
418 482
419 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 483 if (dev->irq_enabled && ++ring->irq_refcount == 1) {
420 if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { 484 drm_i915_private_t *dev_priv = dev->dev_private;
485 unsigned long irqflags;
486
487 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
488
421 if (HAS_PCH_SPLIT(dev)) 489 if (HAS_PCH_SPLIT(dev))
422 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 490 ironlake_enable_graphics_irq(dev_priv,
491 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
423 else 492 else
424 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 493 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
494
495 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
425 } 496 }
426 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
427} 497}
428 498
429static void 499static void
430render_ring_put_user_irq(struct intel_ring_buffer *ring) 500render_ring_put_irq(struct intel_ring_buffer *ring)
431{ 501{
432 struct drm_device *dev = ring->dev; 502 struct drm_device *dev = ring->dev;
433 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
434 unsigned long irqflags;
435 503
436 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 504 BUG_ON(dev->irq_enabled && ring->irq_refcount == 0);
437 BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); 505 if (dev->irq_enabled && --ring->irq_refcount == 0) {
438 if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { 506 drm_i915_private_t *dev_priv = dev->dev_private;
507 unsigned long irqflags;
508
509 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
439 if (HAS_PCH_SPLIT(dev)) 510 if (HAS_PCH_SPLIT(dev))
440 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 511 ironlake_disable_graphics_irq(dev_priv,
512 GT_USER_INTERRUPT |
513 GT_PIPE_NOTIFY);
441 else 514 else
442 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 515 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
516 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
443 } 517 }
444 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
445} 518}
446 519
447void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 520void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -459,6 +532,9 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
459 u32 invalidate_domains, 532 u32 invalidate_domains,
460 u32 flush_domains) 533 u32 flush_domains)
461{ 534{
535 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
536 return;
537
462 if (intel_ring_begin(ring, 2) == 0) { 538 if (intel_ring_begin(ring, 2) == 0) {
463 intel_ring_emit(ring, MI_FLUSH); 539 intel_ring_emit(ring, MI_FLUSH);
464 intel_ring_emit(ring, MI_NOOP); 540 intel_ring_emit(ring, MI_NOOP);
@@ -491,20 +567,45 @@ ring_add_request(struct intel_ring_buffer *ring,
491} 567}
492 568
493static void 569static void
494bsd_ring_get_user_irq(struct intel_ring_buffer *ring) 570ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
495{ 571{
496 /* do nothing */ 572 struct drm_device *dev = ring->dev;
573
574 if (dev->irq_enabled && ++ring->irq_refcount == 1) {
575 drm_i915_private_t *dev_priv = dev->dev_private;
576 unsigned long irqflags;
577
578 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
579 ironlake_enable_graphics_irq(dev_priv, flag);
580 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
581 }
497} 582}
583
498static void 584static void
499bsd_ring_put_user_irq(struct intel_ring_buffer *ring) 585ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
500{ 586{
501 /* do nothing */ 587 struct drm_device *dev = ring->dev;
588
589 if (dev->irq_enabled && --ring->irq_refcount == 0) {
590 drm_i915_private_t *dev_priv = dev->dev_private;
591 unsigned long irqflags;
592
593 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
594 ironlake_disable_graphics_irq(dev_priv, flag);
595 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
596 }
502} 597}
503 598
504static u32 599
505ring_status_page_get_seqno(struct intel_ring_buffer *ring) 600static void
601bsd_ring_get_irq(struct intel_ring_buffer *ring)
506{ 602{
507 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 603 ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
604}
605static void
606bsd_ring_put_irq(struct intel_ring_buffer *ring)
607{
608 ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
508} 609}
509 610
510static int 611static int
@@ -817,9 +918,9 @@ static const struct intel_ring_buffer render_ring = {
817 .write_tail = ring_write_tail, 918 .write_tail = ring_write_tail,
818 .flush = render_ring_flush, 919 .flush = render_ring_flush,
819 .add_request = render_ring_add_request, 920 .add_request = render_ring_add_request,
820 .get_seqno = render_ring_get_seqno, 921 .get_seqno = ring_get_seqno,
821 .user_irq_get = render_ring_get_user_irq, 922 .irq_get = render_ring_get_irq,
822 .user_irq_put = render_ring_put_user_irq, 923 .irq_put = render_ring_put_irq,
823 .dispatch_execbuffer = render_ring_dispatch_execbuffer, 924 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
824 .cleanup = render_ring_cleanup, 925 .cleanup = render_ring_cleanup,
825}; 926};
@@ -835,9 +936,9 @@ static const struct intel_ring_buffer bsd_ring = {
835 .write_tail = ring_write_tail, 936 .write_tail = ring_write_tail,
836 .flush = bsd_ring_flush, 937 .flush = bsd_ring_flush,
837 .add_request = ring_add_request, 938 .add_request = ring_add_request,
838 .get_seqno = ring_status_page_get_seqno, 939 .get_seqno = ring_get_seqno,
839 .user_irq_get = bsd_ring_get_user_irq, 940 .irq_get = bsd_ring_get_irq,
840 .user_irq_put = bsd_ring_put_user_irq, 941 .irq_put = bsd_ring_put_irq,
841 .dispatch_execbuffer = ring_dispatch_execbuffer, 942 .dispatch_execbuffer = ring_dispatch_execbuffer,
842}; 943};
843 944
@@ -868,6 +969,9 @@ static void gen6_ring_flush(struct intel_ring_buffer *ring,
868 u32 invalidate_domains, 969 u32 invalidate_domains,
869 u32 flush_domains) 970 u32 flush_domains)
870{ 971{
972 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
973 return;
974
871 if (intel_ring_begin(ring, 4) == 0) { 975 if (intel_ring_begin(ring, 4) == 0) {
872 intel_ring_emit(ring, MI_FLUSH_DW); 976 intel_ring_emit(ring, MI_FLUSH_DW);
873 intel_ring_emit(ring, 0); 977 intel_ring_emit(ring, 0);
@@ -895,33 +999,46 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
895 return 0; 999 return 0;
896} 1000}
897 1001
1002static void
1003gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1004{
1005 ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
1006}
1007
1008static void
1009gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1010{
1011 ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
1012}
1013
898/* ring buffer for Video Codec for Gen6+ */ 1014/* ring buffer for Video Codec for Gen6+ */
899static const struct intel_ring_buffer gen6_bsd_ring = { 1015static const struct intel_ring_buffer gen6_bsd_ring = {
900 .name = "gen6 bsd ring", 1016 .name = "gen6 bsd ring",
901 .id = RING_BSD, 1017 .id = RING_BSD,
902 .mmio_base = GEN6_BSD_RING_BASE, 1018 .mmio_base = GEN6_BSD_RING_BASE,
903 .size = 32 * PAGE_SIZE, 1019 .size = 32 * PAGE_SIZE,
904 .init = init_ring_common, 1020 .init = init_ring_common,
905 .write_tail = gen6_bsd_ring_write_tail, 1021 .write_tail = gen6_bsd_ring_write_tail,
906 .flush = gen6_ring_flush, 1022 .flush = gen6_ring_flush,
907 .add_request = ring_add_request, 1023 .add_request = gen6_add_request,
908 .get_seqno = ring_status_page_get_seqno, 1024 .get_seqno = ring_get_seqno,
909 .user_irq_get = bsd_ring_get_user_irq, 1025 .irq_get = gen6_bsd_ring_get_irq,
910 .user_irq_put = bsd_ring_put_user_irq, 1026 .irq_put = gen6_bsd_ring_put_irq,
911 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1027 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
912}; 1028};
913 1029
914/* Blitter support (SandyBridge+) */ 1030/* Blitter support (SandyBridge+) */
915 1031
916static void 1032static void
917blt_ring_get_user_irq(struct intel_ring_buffer *ring) 1033blt_ring_get_irq(struct intel_ring_buffer *ring)
918{ 1034{
919 /* do nothing */ 1035 ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
920} 1036}
1037
921static void 1038static void
922blt_ring_put_user_irq(struct intel_ring_buffer *ring) 1039blt_ring_put_irq(struct intel_ring_buffer *ring)
923{ 1040{
924 /* do nothing */ 1041 ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
925} 1042}
926 1043
927 1044
@@ -994,6 +1111,9 @@ static void blt_ring_flush(struct intel_ring_buffer *ring,
994 u32 invalidate_domains, 1111 u32 invalidate_domains,
995 u32 flush_domains) 1112 u32 flush_domains)
996{ 1113{
1114 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1115 return;
1116
997 if (blt_ring_begin(ring, 4) == 0) { 1117 if (blt_ring_begin(ring, 4) == 0) {
998 intel_ring_emit(ring, MI_FLUSH_DW); 1118 intel_ring_emit(ring, MI_FLUSH_DW);
999 intel_ring_emit(ring, 0); 1119 intel_ring_emit(ring, 0);
@@ -1003,30 +1123,6 @@ static void blt_ring_flush(struct intel_ring_buffer *ring,
1003 } 1123 }
1004} 1124}
1005 1125
1006static int
1007blt_ring_add_request(struct intel_ring_buffer *ring,
1008 u32 *result)
1009{
1010 u32 seqno;
1011 int ret;
1012
1013 ret = blt_ring_begin(ring, 4);
1014 if (ret)
1015 return ret;
1016
1017 seqno = i915_gem_get_seqno(ring->dev);
1018
1019 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1020 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1021 intel_ring_emit(ring, seqno);
1022 intel_ring_emit(ring, MI_USER_INTERRUPT);
1023 intel_ring_advance(ring);
1024
1025 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
1026 *result = seqno;
1027 return 0;
1028}
1029
1030static void blt_ring_cleanup(struct intel_ring_buffer *ring) 1126static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1031{ 1127{
1032 if (!ring->private) 1128 if (!ring->private)
@@ -1045,10 +1141,10 @@ static const struct intel_ring_buffer gen6_blt_ring = {
1045 .init = blt_ring_init, 1141 .init = blt_ring_init,
1046 .write_tail = ring_write_tail, 1142 .write_tail = ring_write_tail,
1047 .flush = blt_ring_flush, 1143 .flush = blt_ring_flush,
1048 .add_request = blt_ring_add_request, 1144 .add_request = gen6_add_request,
1049 .get_seqno = ring_status_page_get_seqno, 1145 .get_seqno = ring_get_seqno,
1050 .user_irq_get = blt_ring_get_user_irq, 1146 .irq_get = blt_ring_get_irq,
1051 .user_irq_put = blt_ring_put_user_irq, 1147 .irq_put = blt_ring_put_irq,
1052 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1148 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1053 .cleanup = blt_ring_cleanup, 1149 .cleanup = blt_ring_cleanup,
1054}; 1150};
@@ -1056,36 +1152,43 @@ static const struct intel_ring_buffer gen6_blt_ring = {
1056int intel_init_render_ring_buffer(struct drm_device *dev) 1152int intel_init_render_ring_buffer(struct drm_device *dev)
1057{ 1153{
1058 drm_i915_private_t *dev_priv = dev->dev_private; 1154 drm_i915_private_t *dev_priv = dev->dev_private;
1155 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1059 1156
1060 dev_priv->render_ring = render_ring; 1157 *ring = render_ring;
1158 if (INTEL_INFO(dev)->gen >= 6) {
1159 ring->add_request = gen6_add_request;
1160 } else if (HAS_PIPE_CONTROL(dev)) {
1161 ring->add_request = pc_render_add_request;
1162 ring->get_seqno = pc_render_get_seqno;
1163 }
1061 1164
1062 if (!I915_NEED_GFX_HWS(dev)) { 1165 if (!I915_NEED_GFX_HWS(dev)) {
1063 dev_priv->render_ring.status_page.page_addr 1166 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1064 = dev_priv->status_page_dmah->vaddr; 1167 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1065 memset(dev_priv->render_ring.status_page.page_addr,
1066 0, PAGE_SIZE);
1067 } 1168 }
1068 1169
1069 return intel_init_ring_buffer(dev, &dev_priv->render_ring); 1170 return intel_init_ring_buffer(dev, ring);
1070} 1171}
1071 1172
1072int intel_init_bsd_ring_buffer(struct drm_device *dev) 1173int intel_init_bsd_ring_buffer(struct drm_device *dev)
1073{ 1174{
1074 drm_i915_private_t *dev_priv = dev->dev_private; 1175 drm_i915_private_t *dev_priv = dev->dev_private;
1176 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1075 1177
1076 if (IS_GEN6(dev)) 1178 if (IS_GEN6(dev))
1077 dev_priv->bsd_ring = gen6_bsd_ring; 1179 *ring = gen6_bsd_ring;
1078 else 1180 else
1079 dev_priv->bsd_ring = bsd_ring; 1181 *ring = bsd_ring;
1080 1182
1081 return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); 1183 return intel_init_ring_buffer(dev, ring);
1082} 1184}
1083 1185
1084int intel_init_blt_ring_buffer(struct drm_device *dev) 1186int intel_init_blt_ring_buffer(struct drm_device *dev)
1085{ 1187{
1086 drm_i915_private_t *dev_priv = dev->dev_private; 1188 drm_i915_private_t *dev_priv = dev->dev_private;
1189 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1087 1190
1088 dev_priv->blt_ring = gen6_blt_ring; 1191 *ring = gen6_blt_ring;
1089 1192
1090 return intel_init_ring_buffer(dev, &dev_priv->blt_ring); 1193 return intel_init_ring_buffer(dev, ring);
1091} 1194}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 8e3526777926..6a3822bc6af2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,6 +1,13 @@
1#ifndef _INTEL_RINGBUFFER_H_ 1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_ 2#define _INTEL_RINGBUFFER_H_
3 3
4enum {
5 RCS = 0x0,
6 VCS,
7 BCS,
8 I915_NUM_RINGS,
9};
10
4struct intel_hw_status_page { 11struct intel_hw_status_page {
5 u32 __iomem *page_addr; 12 u32 __iomem *page_addr;
6 unsigned int gfx_addr; 13 unsigned int gfx_addr;
@@ -21,7 +28,10 @@ struct intel_hw_status_page {
21#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base)) 28#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
22#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) 29#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
23 30
24struct drm_i915_gem_execbuffer2; 31#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base))
32#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base))
33#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base))
34
25struct intel_ring_buffer { 35struct intel_ring_buffer {
26 const char *name; 36 const char *name;
27 enum intel_ring_id { 37 enum intel_ring_id {
@@ -42,9 +52,10 @@ struct intel_ring_buffer {
42 52
43 u32 irq_seqno; /* last seq seem at irq time */ 53 u32 irq_seqno; /* last seq seem at irq time */
44 u32 waiting_seqno; 54 u32 waiting_seqno;
45 int user_irq_refcount; 55 u32 sync_seqno[I915_NUM_RINGS-1];
46 void (*user_irq_get)(struct intel_ring_buffer *ring); 56 u32 irq_refcount;
47 void (*user_irq_put)(struct intel_ring_buffer *ring); 57 void (*irq_get)(struct intel_ring_buffer *ring);
58 void (*irq_put)(struct intel_ring_buffer *ring);
48 59
49 int (*init)(struct intel_ring_buffer *ring); 60 int (*init)(struct intel_ring_buffer *ring);
50 61
@@ -99,6 +110,25 @@ struct intel_ring_buffer {
99}; 110};
100 111
101static inline u32 112static inline u32
113intel_ring_sync_index(struct intel_ring_buffer *ring,
114 struct intel_ring_buffer *other)
115{
116 int idx;
117
118 /*
119 * cs -> 0 = vcs, 1 = bcs
120 * vcs -> 0 = bcs, 1 = cs,
121 * bcs -> 0 = cs, 1 = vcs.
122 */
123
124 idx = (other - ring) - 1;
125 if (idx < 0)
126 idx += I915_NUM_RINGS;
127
128 return idx;
129}
130
131static inline u32
102intel_read_status_page(struct intel_ring_buffer *ring, 132intel_read_status_page(struct intel_ring_buffer *ring,
103 int reg) 133 int reg)
104{ 134{
@@ -119,6 +149,9 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
119void intel_ring_advance(struct intel_ring_buffer *ring); 149void intel_ring_advance(struct intel_ring_buffer *ring);
120 150
121u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); 151u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
152int intel_ring_sync(struct intel_ring_buffer *ring,
153 struct intel_ring_buffer *to,
154 u32 seqno);
122 155
123int intel_init_render_ring_buffer(struct drm_device *dev); 156int intel_init_render_ring_buffer(struct drm_device *dev);
124int intel_init_bsd_ring_buffer(struct drm_device *dev); 157int intel_init_bsd_ring_buffer(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 2f7681989316..93206e4eaa6f 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1245,10 +1245,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1245 int type; 1245 int type;
1246 1246
1247 /* Disable TV interrupts around load detect or we'll recurse */ 1247 /* Disable TV interrupts around load detect or we'll recurse */
1248 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1248 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1249 i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | 1249 i915_disable_pipestat(dev_priv, 0,
1250 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1250 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1251 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1251 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1252 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1252 1253
1253 save_tv_dac = tv_dac = I915_READ(TV_DAC); 1254 save_tv_dac = tv_dac = I915_READ(TV_DAC);
1254 save_tv_ctl = tv_ctl = I915_READ(TV_CTL); 1255 save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1301,10 +1302,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1301 I915_WRITE(TV_CTL, save_tv_ctl); 1302 I915_WRITE(TV_CTL, save_tv_ctl);
1302 1303
1303 /* Restore interrupt config */ 1304 /* Restore interrupt config */
1304 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1305 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1305 i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | 1306 i915_enable_pipestat(dev_priv, 0,
1307 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1306 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1308 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1307 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1309 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1308 1310
1309 return type; 1311 return type;
1310} 1312}