aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_dma.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-12-04 06:30:53 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2010-12-04 19:37:38 -0500
commit1ec14ad3132702694f2e1a90b30641cf111183b9 (patch)
tree98ca9ae91f14ff5d8feed306941ea2c46479e71a /drivers/gpu/drm/i915/i915_dma.c
parent340479aac697bc73e225c122a9753d4964eeda3f (diff)
drm/i915: Implement GPU semaphores for inter-ring synchronisation on SNB
The bulk of the change is to convert the growing list of rings into an array so that the relationship between the rings and the semaphore sync registers can be easily computed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_dma.c')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c60
1 files changed, 31 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9a22da9b2083..664300986fb4 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -49,6 +49,8 @@
49static int i915_init_phys_hws(struct drm_device *dev) 49static int i915_init_phys_hws(struct drm_device *dev)
50{ 50{
51 drm_i915_private_t *dev_priv = dev->dev_private; 51 drm_i915_private_t *dev_priv = dev->dev_private;
52 struct intel_ring_buffer *ring = LP_RING(dev_priv);
53
52 /* Program Hardware Status Page */ 54 /* Program Hardware Status Page */
53 dev_priv->status_page_dmah = 55 dev_priv->status_page_dmah =
54 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); 56 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
@@ -57,11 +59,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
57 DRM_ERROR("Can not allocate hardware status page\n"); 59 DRM_ERROR("Can not allocate hardware status page\n");
58 return -ENOMEM; 60 return -ENOMEM;
59 } 61 }
60 dev_priv->render_ring.status_page.page_addr 62 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
61 = dev_priv->status_page_dmah->vaddr;
62 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 63 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
63 64
64 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); 65 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
65 66
66 if (INTEL_INFO(dev)->gen >= 4) 67 if (INTEL_INFO(dev)->gen >= 4)
67 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 68 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -79,13 +80,15 @@ static int i915_init_phys_hws(struct drm_device *dev)
79static void i915_free_hws(struct drm_device *dev) 80static void i915_free_hws(struct drm_device *dev)
80{ 81{
81 drm_i915_private_t *dev_priv = dev->dev_private; 82 drm_i915_private_t *dev_priv = dev->dev_private;
83 struct intel_ring_buffer *ring = LP_RING(dev_priv);
84
82 if (dev_priv->status_page_dmah) { 85 if (dev_priv->status_page_dmah) {
83 drm_pci_free(dev, dev_priv->status_page_dmah); 86 drm_pci_free(dev, dev_priv->status_page_dmah);
84 dev_priv->status_page_dmah = NULL; 87 dev_priv->status_page_dmah = NULL;
85 } 88 }
86 89
87 if (dev_priv->render_ring.status_page.gfx_addr) { 90 if (ring->status_page.gfx_addr) {
88 dev_priv->render_ring.status_page.gfx_addr = 0; 91 ring->status_page.gfx_addr = 0;
89 drm_core_ioremapfree(&dev_priv->hws_map, dev); 92 drm_core_ioremapfree(&dev_priv->hws_map, dev);
90 } 93 }
91 94
@@ -97,7 +100,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
97{ 100{
98 drm_i915_private_t *dev_priv = dev->dev_private; 101 drm_i915_private_t *dev_priv = dev->dev_private;
99 struct drm_i915_master_private *master_priv; 102 struct drm_i915_master_private *master_priv;
100 struct intel_ring_buffer *ring = &dev_priv->render_ring; 103 struct intel_ring_buffer *ring = LP_RING(dev_priv);
101 104
102 /* 105 /*
103 * We should never lose context on the ring with modesetting 106 * We should never lose context on the ring with modesetting
@@ -123,6 +126,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
123static int i915_dma_cleanup(struct drm_device * dev) 126static int i915_dma_cleanup(struct drm_device * dev)
124{ 127{
125 drm_i915_private_t *dev_priv = dev->dev_private; 128 drm_i915_private_t *dev_priv = dev->dev_private;
129 int i;
130
126 /* Make sure interrupts are disabled here because the uninstall ioctl 131 /* Make sure interrupts are disabled here because the uninstall ioctl
127 * may not have been called from userspace and after dev_private 132 * may not have been called from userspace and after dev_private
128 * is freed, it's too late. 133 * is freed, it's too late.
@@ -131,9 +136,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
131 drm_irq_uninstall(dev); 136 drm_irq_uninstall(dev);
132 137
133 mutex_lock(&dev->struct_mutex); 138 mutex_lock(&dev->struct_mutex);
134 intel_cleanup_ring_buffer(&dev_priv->render_ring); 139 for (i = 0; i < I915_NUM_RINGS; i++)
135 intel_cleanup_ring_buffer(&dev_priv->bsd_ring); 140 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
136 intel_cleanup_ring_buffer(&dev_priv->blt_ring);
137 mutex_unlock(&dev->struct_mutex); 141 mutex_unlock(&dev->struct_mutex);
138 142
139 /* Clear the HWS virtual address at teardown */ 143 /* Clear the HWS virtual address at teardown */
@@ -147,6 +151,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
147{ 151{
148 drm_i915_private_t *dev_priv = dev->dev_private; 152 drm_i915_private_t *dev_priv = dev->dev_private;
149 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 153 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
154 struct intel_ring_buffer *ring = LP_RING(dev_priv);
150 155
151 master_priv->sarea = drm_getsarea(dev); 156 master_priv->sarea = drm_getsarea(dev);
152 if (master_priv->sarea) { 157 if (master_priv->sarea) {
@@ -157,24 +162,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
157 } 162 }
158 163
159 if (init->ring_size != 0) { 164 if (init->ring_size != 0) {
160 if (dev_priv->render_ring.obj != NULL) { 165 if (ring->obj != NULL) {
161 i915_dma_cleanup(dev); 166 i915_dma_cleanup(dev);
162 DRM_ERROR("Client tried to initialize ringbuffer in " 167 DRM_ERROR("Client tried to initialize ringbuffer in "
163 "GEM mode\n"); 168 "GEM mode\n");
164 return -EINVAL; 169 return -EINVAL;
165 } 170 }
166 171
167 dev_priv->render_ring.size = init->ring_size; 172 ring->size = init->ring_size;
168 173
169 dev_priv->render_ring.map.offset = init->ring_start; 174 ring->map.offset = init->ring_start;
170 dev_priv->render_ring.map.size = init->ring_size; 175 ring->map.size = init->ring_size;
171 dev_priv->render_ring.map.type = 0; 176 ring->map.type = 0;
172 dev_priv->render_ring.map.flags = 0; 177 ring->map.flags = 0;
173 dev_priv->render_ring.map.mtrr = 0; 178 ring->map.mtrr = 0;
174 179
175 drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); 180 drm_core_ioremap_wc(&ring->map, dev);
176 181
177 if (dev_priv->render_ring.map.handle == NULL) { 182 if (ring->map.handle == NULL) {
178 i915_dma_cleanup(dev); 183 i915_dma_cleanup(dev);
179 DRM_ERROR("can not ioremap virtual address for" 184 DRM_ERROR("can not ioremap virtual address for"
180 " ring buffer\n"); 185 " ring buffer\n");
@@ -182,7 +187,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
182 } 187 }
183 } 188 }
184 189
185 dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; 190 ring->virtual_start = ring->map.handle;
186 191
187 dev_priv->cpp = init->cpp; 192 dev_priv->cpp = init->cpp;
188 dev_priv->back_offset = init->back_offset; 193 dev_priv->back_offset = init->back_offset;
@@ -201,12 +206,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
201static int i915_dma_resume(struct drm_device * dev) 206static int i915_dma_resume(struct drm_device * dev)
202{ 207{
203 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 208 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
209 struct intel_ring_buffer *ring = LP_RING(dev_priv);
204 210
205 struct intel_ring_buffer *ring;
206 DRM_DEBUG_DRIVER("%s\n", __func__); 211 DRM_DEBUG_DRIVER("%s\n", __func__);
207 212
208 ring = &dev_priv->render_ring;
209
210 if (ring->map.handle == NULL) { 213 if (ring->map.handle == NULL) {
211 DRM_ERROR("can not ioremap virtual address for" 214 DRM_ERROR("can not ioremap virtual address for"
212 " ring buffer\n"); 215 " ring buffer\n");
@@ -326,7 +329,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
326 drm_i915_private_t *dev_priv = dev->dev_private; 329 drm_i915_private_t *dev_priv = dev->dev_private;
327 int i, ret; 330 int i, ret;
328 331
329 if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) 332 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
330 return -EINVAL; 333 return -EINVAL;
331 334
332 for (i = 0; i < dwords;) { 335 for (i = 0; i < dwords;) {
@@ -565,13 +568,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
565 return 0; 568 return 0;
566} 569}
567 570
568static int i915_quiescent(struct drm_device * dev) 571static int i915_quiescent(struct drm_device *dev)
569{ 572{
570 drm_i915_private_t *dev_priv = dev->dev_private; 573 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
571 574
572 i915_kernel_lost_context(dev); 575 i915_kernel_lost_context(dev);
573 return intel_wait_ring_buffer(&dev_priv->render_ring, 576 return intel_wait_ring_buffer(ring, ring->size - 8);
574 dev_priv->render_ring.size - 8);
575} 577}
576 578
577static int i915_flush_ioctl(struct drm_device *dev, void *data, 579static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -828,7 +830,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
828{ 830{
829 drm_i915_private_t *dev_priv = dev->dev_private; 831 drm_i915_private_t *dev_priv = dev->dev_private;
830 drm_i915_hws_addr_t *hws = data; 832 drm_i915_hws_addr_t *hws = data;
831 struct intel_ring_buffer *ring = &dev_priv->render_ring; 833 struct intel_ring_buffer *ring = LP_RING(dev_priv);
832 834
833 if (!I915_NEED_GFX_HWS(dev)) 835 if (!I915_NEED_GFX_HWS(dev))
834 return -EINVAL; 836 return -EINVAL;
@@ -1978,7 +1980,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1978 if (!IS_I945G(dev) && !IS_I945GM(dev)) 1980 if (!IS_I945G(dev) && !IS_I945GM(dev))
1979 pci_enable_msi(dev->pdev); 1981 pci_enable_msi(dev->pdev);
1980 1982
1981 spin_lock_init(&dev_priv->user_irq_lock); 1983 spin_lock_init(&dev_priv->irq_lock);
1982 spin_lock_init(&dev_priv->error_lock); 1984 spin_lock_init(&dev_priv->error_lock);
1983 dev_priv->trace_irq_seqno = 0; 1985 dev_priv->trace_irq_seqno = 0;
1984 1986