aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorZou Nan hai <nanhai.zou@intel.com>2010-05-20 21:08:55 -0400
committerEric Anholt <eric@anholt.net>2010-05-26 16:24:49 -0400
commit8187a2b70e34c727a06617441f74f202b6fefaf9 (patch)
tree48622c6f95282dc0a0fa668110aac4efa6e89066 /drivers/gpu/drm/i915/intel_ringbuffer.c
parentd3301d86b4bf2bcf649982ae464211d8bcf9575a (diff)
drm/i915: introduce intel_ring_buffer structure (V2)
Introduces a more complete intel_ring_buffer structure with callbacks for setup and management of a particular ringbuffer, and converts the render ring buffer consumers to use it. Signed-off-by: Zou Nan hai <nanhai.zou@intel.com> Signed-off-by: Xiang Hai hao <haihao.xiang@intel.com> [anholt: Fixed up whitespace fail and rebased against prep patches] Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c582
1 files changed, 349 insertions, 233 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 06058ddb4eed..5715c4d8cce9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -29,30 +29,24 @@
29 29
30#include "drmP.h" 30#include "drmP.h"
31#include "drm.h" 31#include "drm.h"
32#include "i915_drm.h"
33#include "i915_drv.h" 32#include "i915_drv.h"
33#include "i915_drm.h"
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h"
36 35
37void 36static void
38i915_gem_flush(struct drm_device *dev, 37render_ring_flush(struct drm_device *dev,
39 uint32_t invalidate_domains, 38 struct intel_ring_buffer *ring,
40 uint32_t flush_domains) 39 u32 invalidate_domains,
40 u32 flush_domains)
41{ 41{
42 drm_i915_private_t *dev_priv = dev->dev_private;
43 uint32_t cmd;
44 RING_LOCALS;
45
46#if WATCH_EXEC 42#if WATCH_EXEC
47 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 43 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
48 invalidate_domains, flush_domains); 44 invalidate_domains, flush_domains);
49#endif 45#endif
50 trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno, 46 u32 cmd;
47 trace_i915_gem_request_flush(dev, ring->next_seqno,
51 invalidate_domains, flush_domains); 48 invalidate_domains, flush_domains);
52 49
53 if (flush_domains & I915_GEM_DOMAIN_CPU)
54 drm_agp_chipset_flush(dev);
55
56 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { 50 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
57 /* 51 /*
58 * read/write caches: 52 * read/write caches:
@@ -100,19 +94,130 @@ i915_gem_flush(struct drm_device *dev,
100#if WATCH_EXEC 94#if WATCH_EXEC
101 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 95 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
102#endif 96#endif
103 BEGIN_LP_RING(2); 97 intel_ring_begin(dev, ring, 8);
104 OUT_RING(cmd); 98 intel_ring_emit(dev, ring, cmd);
105 OUT_RING(MI_NOOP); 99 intel_ring_emit(dev, ring, MI_NOOP);
106 ADVANCE_LP_RING(); 100 intel_ring_advance(dev, ring);
107 } 101 }
102}
103
104static unsigned int render_ring_get_head(struct drm_device *dev,
105 struct intel_ring_buffer *ring)
106{
107 drm_i915_private_t *dev_priv = dev->dev_private;
108 return I915_READ(PRB0_HEAD) & HEAD_ADDR;
109}
108 110
111static unsigned int render_ring_get_tail(struct drm_device *dev,
112 struct intel_ring_buffer *ring)
113{
114 drm_i915_private_t *dev_priv = dev->dev_private;
115 return I915_READ(PRB0_TAIL) & TAIL_ADDR;
109} 116}
117
118static unsigned int render_ring_get_active_head(struct drm_device *dev,
119 struct intel_ring_buffer *ring)
120{
121 drm_i915_private_t *dev_priv = dev->dev_private;
122 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
123
124 return I915_READ(acthd_reg);
125}
126
127static void render_ring_advance_ring(struct drm_device *dev,
128 struct intel_ring_buffer *ring)
129{
130 drm_i915_private_t *dev_priv = dev->dev_private;
131 I915_WRITE(PRB0_TAIL, ring->tail);
132}
133
134static int init_ring_common(struct drm_device *dev,
135 struct intel_ring_buffer *ring)
136{
137 u32 head;
138 drm_i915_private_t *dev_priv = dev->dev_private;
139 struct drm_i915_gem_object *obj_priv;
140 obj_priv = to_intel_bo(ring->gem_object);
141
142 /* Stop the ring if it's running. */
143 I915_WRITE(ring->regs.ctl, 0);
144 I915_WRITE(ring->regs.head, 0);
145 I915_WRITE(ring->regs.tail, 0);
146
147 /* Initialize the ring. */
148 I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
149 head = ring->get_head(dev, ring);
150
151 /* G45 ring initialization fails to reset head to zero */
152 if (head != 0) {
153 DRM_ERROR("%s head not reset to zero "
154 "ctl %08x head %08x tail %08x start %08x\n",
155 ring->name,
156 I915_READ(ring->regs.ctl),
157 I915_READ(ring->regs.head),
158 I915_READ(ring->regs.tail),
159 I915_READ(ring->regs.start));
160
161 I915_WRITE(ring->regs.head, 0);
162
163 DRM_ERROR("%s head forced to zero "
164 "ctl %08x head %08x tail %08x start %08x\n",
165 ring->name,
166 I915_READ(ring->regs.ctl),
167 I915_READ(ring->regs.head),
168 I915_READ(ring->regs.tail),
169 I915_READ(ring->regs.start));
170 }
171
172 I915_WRITE(ring->regs.ctl,
173 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
174 | RING_NO_REPORT | RING_VALID);
175
176 head = I915_READ(ring->regs.head) & HEAD_ADDR;
177 /* If the head is still not zero, the ring is dead */
178 if (head != 0) {
179 DRM_ERROR("%s initialization failed "
180 "ctl %08x head %08x tail %08x start %08x\n",
181 ring->name,
182 I915_READ(ring->regs.ctl),
183 I915_READ(ring->regs.head),
184 I915_READ(ring->regs.tail),
185 I915_READ(ring->regs.start));
186 return -EIO;
187 }
188
189 if (!drm_core_check_feature(dev, DRIVER_MODESET))
190 i915_kernel_lost_context(dev);
191 else {
192 ring->head = ring->get_head(dev, ring);
193 ring->tail = ring->get_tail(dev, ring);
194 ring->space = ring->head - (ring->tail + 8);
195 if (ring->space < 0)
196 ring->space += ring->size;
197 }
198 return 0;
199}
200
201static int init_render_ring(struct drm_device *dev,
202 struct intel_ring_buffer *ring)
203{
204 drm_i915_private_t *dev_priv = dev->dev_private;
205 int ret = init_ring_common(dev, ring);
206 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
207 I915_WRITE(MI_MODE,
208 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
209 }
210 return ret;
211}
212
110#define PIPE_CONTROL_FLUSH(addr) \ 213#define PIPE_CONTROL_FLUSH(addr) \
214do { \
111 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ 215 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
112 PIPE_CONTROL_DEPTH_STALL); \ 216 PIPE_CONTROL_DEPTH_STALL); \
113 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ 217 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
114 OUT_RING(0); \ 218 OUT_RING(0); \
115 OUT_RING(0); \ 219 OUT_RING(0); \
220} while (0)
116 221
117/** 222/**
118 * Creates a new sequence number, emitting a write of it to the status page 223 * Creates a new sequence number, emitting a write of it to the status page
@@ -122,21 +227,15 @@ i915_gem_flush(struct drm_device *dev,
122 * 227 *
123 * Returned sequence numbers are nonzero on success. 228 * Returned sequence numbers are nonzero on success.
124 */ 229 */
125uint32_t 230static u32
126i915_ring_add_request(struct drm_device *dev) 231render_ring_add_request(struct drm_device *dev,
232 struct intel_ring_buffer *ring,
233 struct drm_file *file_priv,
234 u32 flush_domains)
127{ 235{
236 u32 seqno;
128 drm_i915_private_t *dev_priv = dev->dev_private; 237 drm_i915_private_t *dev_priv = dev->dev_private;
129 uint32_t seqno; 238 seqno = intel_ring_get_seqno(dev, ring);
130 RING_LOCALS;
131
132 /* Grab the seqno we're going to make this request be, and bump the
133 * next (skipping 0 so it can be the reserved no-seqno value).
134 */
135 seqno = dev_priv->mm.next_gem_seqno;
136 dev_priv->mm.next_gem_seqno++;
137 if (dev_priv->mm.next_gem_seqno == 0)
138 dev_priv->mm.next_gem_seqno++;
139
140 if (HAS_PIPE_CONTROL(dev)) { 239 if (HAS_PIPE_CONTROL(dev)) {
141 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; 240 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
142 241
@@ -181,13 +280,26 @@ i915_ring_add_request(struct drm_device *dev)
181 return seqno; 280 return seqno;
182} 281}
183 282
184void i915_user_irq_get(struct drm_device *dev) 283static u32
284render_ring_get_gem_seqno(struct drm_device *dev,
285 struct intel_ring_buffer *ring)
286{
287 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
288 if (HAS_PIPE_CONTROL(dev))
289 return ((volatile u32 *)(dev_priv->seqno_page))[0];
290 else
291 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
292}
293
294static void
295render_ring_get_user_irq(struct drm_device *dev,
296 struct intel_ring_buffer *ring)
185{ 297{
186 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 298 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
187 unsigned long irqflags; 299 unsigned long irqflags;
188 300
189 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 301 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
190 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 302 if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
191 if (HAS_PCH_SPLIT(dev)) 303 if (HAS_PCH_SPLIT(dev))
192 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 304 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
193 else 305 else
@@ -196,14 +308,16 @@ void i915_user_irq_get(struct drm_device *dev)
196 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 308 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
197} 309}
198 310
199void i915_user_irq_put(struct drm_device *dev) 311static void
312render_ring_put_user_irq(struct drm_device *dev,
313 struct intel_ring_buffer *ring)
200{ 314{
201 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 315 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
202 unsigned long irqflags; 316 unsigned long irqflags;
203 317
204 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 318 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
205 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 319 BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
206 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 320 if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
207 if (HAS_PCH_SPLIT(dev)) 321 if (HAS_PCH_SPLIT(dev))
208 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 322 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
209 else 323 else
@@ -212,20 +326,31 @@ void i915_user_irq_put(struct drm_device *dev)
212 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 326 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
213} 327}
214 328
215/** Dispatch a batchbuffer to the ring 329static void render_setup_status_page(struct drm_device *dev,
216 */ 330 struct intel_ring_buffer *ring)
217int 331{
218i915_dispatch_gem_execbuffer(struct drm_device *dev, 332 drm_i915_private_t *dev_priv = dev->dev_private;
219 struct drm_i915_gem_execbuffer2 *exec, 333 if (IS_GEN6(dev)) {
220 struct drm_clip_rect *cliprects, 334 I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
221 uint64_t exec_offset) 335 I915_READ(HWS_PGA_GEN6); /* posting read */
336 } else {
337 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
338 I915_READ(HWS_PGA); /* posting read */
339 }
340
341}
342
343static int
344render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
345 struct intel_ring_buffer *ring,
346 struct drm_i915_gem_execbuffer2 *exec,
347 struct drm_clip_rect *cliprects,
348 uint64_t exec_offset)
222{ 349{
223 drm_i915_private_t *dev_priv = dev->dev_private; 350 drm_i915_private_t *dev_priv = dev->dev_private;
224 int nbox = exec->num_cliprects; 351 int nbox = exec->num_cliprects;
225 int i = 0, count; 352 int i = 0, count;
226 uint32_t exec_start, exec_len; 353 uint32_t exec_start, exec_len;
227 RING_LOCALS;
228
229 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 354 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
230 exec_len = (uint32_t) exec->batch_len; 355 exec_len = (uint32_t) exec->batch_len;
231 356
@@ -242,74 +367,61 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
242 } 367 }
243 368
244 if (IS_I830(dev) || IS_845G(dev)) { 369 if (IS_I830(dev) || IS_845G(dev)) {
245 BEGIN_LP_RING(4); 370 intel_ring_begin(dev, ring, 4);
246 OUT_RING(MI_BATCH_BUFFER); 371 intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
247 OUT_RING(exec_start | MI_BATCH_NON_SECURE); 372 intel_ring_emit(dev, ring,
248 OUT_RING(exec_start + exec_len - 4); 373 exec_start | MI_BATCH_NON_SECURE);
249 OUT_RING(0); 374 intel_ring_emit(dev, ring, exec_start + exec_len - 4);
250 ADVANCE_LP_RING(); 375 intel_ring_emit(dev, ring, 0);
251 } else { 376 } else {
252 BEGIN_LP_RING(2); 377 intel_ring_begin(dev, ring, 4);
253 if (IS_I965G(dev)) { 378 if (IS_I965G(dev)) {
254 OUT_RING(MI_BATCH_BUFFER_START | 379 intel_ring_emit(dev, ring,
255 (2 << 6) | 380 MI_BATCH_BUFFER_START | (2 << 6)
256 MI_BATCH_NON_SECURE_I965); 381 | MI_BATCH_NON_SECURE_I965);
257 OUT_RING(exec_start); 382 intel_ring_emit(dev, ring, exec_start);
258 } else { 383 } else {
259 OUT_RING(MI_BATCH_BUFFER_START | 384 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
260 (2 << 6)); 385 | (2 << 6));
261 OUT_RING(exec_start | MI_BATCH_NON_SECURE); 386 intel_ring_emit(dev, ring, exec_start |
387 MI_BATCH_NON_SECURE);
262 } 388 }
263 ADVANCE_LP_RING();
264 } 389 }
390 intel_ring_advance(dev, ring);
265 } 391 }
266 392
267 /* XXX breadcrumb */ 393 /* XXX breadcrumb */
268 return 0; 394 return 0;
269} 395}
270 396
271static void 397static void cleanup_status_page(struct drm_device *dev,
272i915_gem_cleanup_hws(struct drm_device *dev) 398 struct intel_ring_buffer *ring)
273{ 399{
274 drm_i915_private_t *dev_priv = dev->dev_private; 400 drm_i915_private_t *dev_priv = dev->dev_private;
275 struct drm_gem_object *obj; 401 struct drm_gem_object *obj;
276 struct drm_i915_gem_object *obj_priv; 402 struct drm_i915_gem_object *obj_priv;
277 403
278 if (dev_priv->hws_obj == NULL) 404 obj = ring->status_page.obj;
405 if (obj == NULL)
279 return; 406 return;
280
281 obj = dev_priv->hws_obj;
282 obj_priv = to_intel_bo(obj); 407 obj_priv = to_intel_bo(obj);
283 408
284 kunmap(obj_priv->pages[0]); 409 kunmap(obj_priv->pages[0]);
285 i915_gem_object_unpin(obj); 410 i915_gem_object_unpin(obj);
286 drm_gem_object_unreference(obj); 411 drm_gem_object_unreference(obj);
287 dev_priv->hws_obj = NULL; 412 ring->status_page.obj = NULL;
288 413
289 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 414 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
290 dev_priv->hw_status_page = NULL;
291
292 if (HAS_PIPE_CONTROL(dev))
293 i915_gem_cleanup_pipe_control(dev);
294
295 /* Write high address into HWS_PGA when disabling. */
296 I915_WRITE(HWS_PGA, 0x1ffff000);
297} 415}
298 416
299static int 417static int init_status_page(struct drm_device *dev,
300i915_gem_init_hws(struct drm_device *dev) 418 struct intel_ring_buffer *ring)
301{ 419{
302 drm_i915_private_t *dev_priv = dev->dev_private; 420 drm_i915_private_t *dev_priv = dev->dev_private;
303 struct drm_gem_object *obj; 421 struct drm_gem_object *obj;
304 struct drm_i915_gem_object *obj_priv; 422 struct drm_i915_gem_object *obj_priv;
305 int ret; 423 int ret;
306 424
307 /* If we need a physical address for the status page, it's already
308 * initialized at driver load time.
309 */
310 if (!I915_NEED_GFX_HWS(dev))
311 return 0;
312
313 obj = i915_gem_alloc_object(dev, 4096); 425 obj = i915_gem_alloc_object(dev, 4096);
314 if (obj == NULL) { 426 if (obj == NULL) {
315 DRM_ERROR("Failed to allocate status page\n"); 427 DRM_ERROR("Failed to allocate status page\n");
@@ -321,36 +433,21 @@ i915_gem_init_hws(struct drm_device *dev)
321 433
322 ret = i915_gem_object_pin(obj, 4096); 434 ret = i915_gem_object_pin(obj, 4096);
323 if (ret != 0) { 435 if (ret != 0) {
324 drm_gem_object_unreference(obj);
325 goto err_unref; 436 goto err_unref;
326 } 437 }
327 438
328 dev_priv->status_gfx_addr = obj_priv->gtt_offset; 439 ring->status_page.gfx_addr = obj_priv->gtt_offset;
329 440 ring->status_page.page_addr = kmap(obj_priv->pages[0]);
330 dev_priv->hw_status_page = kmap(obj_priv->pages[0]); 441 if (ring->status_page.page_addr == NULL) {
331 if (dev_priv->hw_status_page == NULL) {
332 DRM_ERROR("Failed to map status page.\n");
333 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 442 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
334 ret = -EINVAL;
335 goto err_unpin; 443 goto err_unpin;
336 } 444 }
445 ring->status_page.obj = obj;
446 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
337 447
338 if (HAS_PIPE_CONTROL(dev)) { 448 ring->setup_status_page(dev, ring);
339 ret = i915_gem_init_pipe_control(dev); 449 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
340 if (ret) 450 ring->name, ring->status_page.gfx_addr);
341 goto err_unpin;
342 }
343
344 dev_priv->hws_obj = obj;
345 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
346 if (IS_GEN6(dev)) {
347 I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
348 I915_READ(HWS_PGA_GEN6); /* posting read */
349 } else {
350 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
351 I915_READ(HWS_PGA); /* posting read */
352 }
353 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
354 451
355 return 0; 452 return 0;
356 453
@@ -359,43 +456,42 @@ err_unpin:
359err_unref: 456err_unref:
360 drm_gem_object_unreference(obj); 457 drm_gem_object_unreference(obj);
361err: 458err:
362 return 0; 459 return ret;
363} 460}
364 461
365int 462
366i915_gem_init_ringbuffer(struct drm_device *dev) 463int intel_init_ring_buffer(struct drm_device *dev,
464 struct intel_ring_buffer *ring)
367{ 465{
368 drm_i915_private_t *dev_priv = dev->dev_private;
369 struct drm_gem_object *obj;
370 struct drm_i915_gem_object *obj_priv;
371 drm_i915_ring_buffer_t *ring = &dev_priv->render_ring;
372 int ret; 466 int ret;
373 u32 head; 467 struct drm_i915_gem_object *obj_priv;
468 struct drm_gem_object *obj;
469 ring->dev = dev;
374 470
375 ret = i915_gem_init_hws(dev); 471 if (I915_NEED_GFX_HWS(dev)) {
376 if (ret != 0) 472 ret = init_status_page(dev, ring);
377 return ret; 473 if (ret)
474 return ret;
475 }
378 476
379 obj = i915_gem_alloc_object(dev, 128 * 1024); 477 obj = i915_gem_alloc_object(dev, ring->size);
380 if (obj == NULL) { 478 if (obj == NULL) {
381 DRM_ERROR("Failed to allocate ringbuffer\n"); 479 DRM_ERROR("Failed to allocate ringbuffer\n");
382 i915_gem_cleanup_hws(dev); 480 ret = -ENOMEM;
383 return -ENOMEM; 481 goto cleanup;
384 } 482 }
385 obj_priv = to_intel_bo(obj);
386 483
387 ret = i915_gem_object_pin(obj, 4096); 484 ring->gem_object = obj;
485
486 ret = i915_gem_object_pin(obj, ring->alignment);
388 if (ret != 0) { 487 if (ret != 0) {
389 drm_gem_object_unreference(obj); 488 drm_gem_object_unreference(obj);
390 i915_gem_cleanup_hws(dev); 489 goto cleanup;
391 return ret;
392 } 490 }
393 491
394 /* Set up the kernel mapping for the ring. */ 492 obj_priv = to_intel_bo(obj);
395 ring->Size = obj->size; 493 ring->map.size = ring->size;
396
397 ring->map.offset = dev->agp->base + obj_priv->gtt_offset; 494 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
398 ring->map.size = obj->size;
399 ring->map.type = 0; 495 ring->map.type = 0;
400 ring->map.flags = 0; 496 ring->map.flags = 0;
401 ring->map.mtrr = 0; 497 ring->map.mtrr = 0;
@@ -403,143 +499,85 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
403 drm_core_ioremap_wc(&ring->map, dev); 499 drm_core_ioremap_wc(&ring->map, dev);
404 if (ring->map.handle == NULL) { 500 if (ring->map.handle == NULL) {
405 DRM_ERROR("Failed to map ringbuffer.\n"); 501 DRM_ERROR("Failed to map ringbuffer.\n");
406 memset(&dev_priv->render_ring, 0, sizeof(dev_priv->render_ring));
407 i915_gem_object_unpin(obj); 502 i915_gem_object_unpin(obj);
408 drm_gem_object_unreference(obj); 503 drm_gem_object_unreference(obj);
409 i915_gem_cleanup_hws(dev); 504 ret = -EINVAL;
410 return -EINVAL; 505 goto cleanup;
411 }
412 ring->ring_obj = obj;
413 ring->virtual_start = ring->map.handle;
414
415 /* Stop the ring if it's running. */
416 I915_WRITE(PRB0_CTL, 0);
417 I915_WRITE(PRB0_TAIL, 0);
418 I915_WRITE(PRB0_HEAD, 0);
419
420 /* Initialize the ring. */
421 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
422 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
423
424 /* G45 ring initialization fails to reset head to zero */
425 if (head != 0) {
426 DRM_ERROR("Ring head not reset to zero "
427 "ctl %08x head %08x tail %08x start %08x\n",
428 I915_READ(PRB0_CTL),
429 I915_READ(PRB0_HEAD),
430 I915_READ(PRB0_TAIL),
431 I915_READ(PRB0_START));
432 I915_WRITE(PRB0_HEAD, 0);
433
434 DRM_ERROR("Ring head forced to zero "
435 "ctl %08x head %08x tail %08x start %08x\n",
436 I915_READ(PRB0_CTL),
437 I915_READ(PRB0_HEAD),
438 I915_READ(PRB0_TAIL),
439 I915_READ(PRB0_START));
440 } 506 }
441 507
442 I915_WRITE(PRB0_CTL, 508 ring->virtual_start = ring->map.handle;
443 ((obj->size - 4096) & RING_NR_PAGES) | 509 ret = ring->init(dev, ring);
444 RING_NO_REPORT | 510 if (ret != 0) {
445 RING_VALID); 511 intel_cleanup_ring_buffer(dev, ring);
446 512 return ret;
447 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
448
449 /* If the head is still not zero, the ring is dead */
450 if (head != 0) {
451 DRM_ERROR("Ring initialization failed "
452 "ctl %08x head %08x tail %08x start %08x\n",
453 I915_READ(PRB0_CTL),
454 I915_READ(PRB0_HEAD),
455 I915_READ(PRB0_TAIL),
456 I915_READ(PRB0_START));
457 return -EIO;
458 } 513 }
459 514
460 /* Update our cache of the ring state */
461 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 515 if (!drm_core_check_feature(dev, DRIVER_MODESET))
462 i915_kernel_lost_context(dev); 516 i915_kernel_lost_context(dev);
463 else { 517 else {
464 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 518 ring->head = ring->get_head(dev, ring);
465 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 519 ring->tail = ring->get_tail(dev, ring);
466 ring->space = ring->head - (ring->tail + 8); 520 ring->space = ring->head - (ring->tail + 8);
467 if (ring->space < 0) 521 if (ring->space < 0)
468 ring->space += ring->Size; 522 ring->space += ring->size;
469 } 523 }
470 524 INIT_LIST_HEAD(&ring->active_list);
471 if (IS_I9XX(dev) && !IS_GEN3(dev)) { 525 INIT_LIST_HEAD(&ring->request_list);
472 I915_WRITE(MI_MODE, 526 return ret;
473 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); 527cleanup:
474 } 528 cleanup_status_page(dev, ring);
475 529 return ret;
476 return 0;
477} 530}
478 531
479void 532void intel_cleanup_ring_buffer(struct drm_device *dev,
480i915_gem_cleanup_ringbuffer(struct drm_device *dev) 533 struct intel_ring_buffer *ring)
481{ 534{
482 drm_i915_private_t *dev_priv = dev->dev_private; 535 if (ring->gem_object == NULL)
483
484 if (dev_priv->render_ring.ring_obj == NULL)
485 return; 536 return;
486 537
487 drm_core_ioremapfree(&dev_priv->render_ring.map, dev); 538 drm_core_ioremapfree(&ring->map, dev);
488
489 i915_gem_object_unpin(dev_priv->render_ring.ring_obj);
490 drm_gem_object_unreference(dev_priv->render_ring.ring_obj);
491 dev_priv->render_ring.ring_obj = NULL;
492 memset(&dev_priv->render_ring, 0, sizeof(dev_priv->render_ring));
493 539
494 i915_gem_cleanup_hws(dev); 540 i915_gem_object_unpin(ring->gem_object);
541 drm_gem_object_unreference(ring->gem_object);
542 ring->gem_object = NULL;
543 cleanup_status_page(dev, ring);
495} 544}
496 545
497/* As a ringbuffer is only allowed to wrap between instructions, fill 546int intel_wrap_ring_buffer(struct drm_device *dev,
498 * the tail with NOOPs. 547 struct intel_ring_buffer *ring)
499 */
500int i915_wrap_ring(struct drm_device *dev)
501{ 548{
502 drm_i915_private_t *dev_priv = dev->dev_private; 549 unsigned int *virt;
503 volatile unsigned int *virt;
504 int rem; 550 int rem;
551 rem = ring->size - ring->tail;
505 552
506 rem = dev_priv->render_ring.Size - dev_priv->render_ring.tail; 553 if (ring->space < rem) {
507 if (dev_priv->render_ring.space < rem) { 554 int ret = intel_wait_ring_buffer(dev, ring, rem);
508 int ret = i915_wait_ring(dev, rem, __func__);
509 if (ret) 555 if (ret)
510 return ret; 556 return ret;
511 } 557 }
512 dev_priv->render_ring.space -= rem;
513 558
514 virt = (unsigned int *) 559 virt = (unsigned int *)(ring->virtual_start + ring->tail);
515 (dev_priv->render_ring.virtual_start + dev_priv->render_ring.tail);
516 rem /= 4; 560 rem /= 4;
517 while (rem--) 561 while (rem--)
518 *virt++ = MI_NOOP; 562 *virt++ = MI_NOOP;
519 563
520 dev_priv->render_ring.tail = 0; 564 ring->tail = 0;
521 565
522 return 0; 566 return 0;
523} 567}
524 568
525int i915_wait_ring(struct drm_device * dev, int n, const char *caller) 569int intel_wait_ring_buffer(struct drm_device *dev,
570 struct intel_ring_buffer *ring, int n)
526{ 571{
527 drm_i915_private_t *dev_priv = dev->dev_private; 572 unsigned long end;
528 drm_i915_ring_buffer_t *ring = &(dev_priv->render_ring);
529 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
530 u32 last_acthd = I915_READ(acthd_reg);
531 u32 acthd;
532 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
533 int i;
534 573
535 trace_i915_ring_wait_begin (dev); 574 trace_i915_ring_wait_begin (dev);
536 575 end = jiffies + 3 * HZ;
537 for (i = 0; i < 100000; i++) { 576 do {
538 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 577 ring->head = ring->get_head(dev, ring);
539 acthd = I915_READ(acthd_reg);
540 ring->space = ring->head - (ring->tail + 8); 578 ring->space = ring->head - (ring->tail + 8);
541 if (ring->space < 0) 579 if (ring->space < 0)
542 ring->space += ring->Size; 580 ring->space += ring->size;
543 if (ring->space >= n) { 581 if (ring->space >= n) {
544 trace_i915_ring_wait_end (dev); 582 trace_i915_ring_wait_end (dev);
545 return 0; 583 return 0;
@@ -550,19 +588,97 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
550 if (master_priv->sarea_priv) 588 if (master_priv->sarea_priv)
551 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 589 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
552 } 590 }
591 yield();
592 } while (!time_after(jiffies, end));
593 trace_i915_ring_wait_end (dev);
594 return -EBUSY;
595}
553 596
597void intel_ring_begin(struct drm_device *dev,
598 struct intel_ring_buffer *ring, int n)
599{
600 if (unlikely(ring->tail + n > ring->size))
601 intel_wrap_ring_buffer(dev, ring);
602 if (unlikely(ring->space < n))
603 intel_wait_ring_buffer(dev, ring, n);
604}
554 605
555 if (ring->head != last_head) 606void intel_ring_emit(struct drm_device *dev,
556 i = 0; 607 struct intel_ring_buffer *ring, unsigned int data)
557 if (acthd != last_acthd) 608{
558 i = 0; 609 unsigned int *virt = ring->virtual_start + ring->tail;
610 *virt = data;
611 ring->tail += 4;
612 ring->tail &= ring->size - 1;
613 ring->space -= 4;
614}
559 615
560 last_head = ring->head; 616void intel_ring_advance(struct drm_device *dev,
561 last_acthd = acthd; 617 struct intel_ring_buffer *ring)
562 msleep_interruptible(10); 618{
619 ring->advance_ring(dev, ring);
620}
563 621
564 } 622void intel_fill_struct(struct drm_device *dev,
623 struct intel_ring_buffer *ring,
624 void *data,
625 unsigned int len)
626{
627 unsigned int *virt = ring->virtual_start + ring->tail;
628 BUG_ON((len&~(4-1)) != 0);
629 intel_ring_begin(dev, ring, len);
630 memcpy(virt, data, len);
631 ring->tail += len;
632 ring->tail &= ring->size - 1;
633 ring->space -= len;
634 intel_ring_advance(dev, ring);
635}
565 636
566 trace_i915_ring_wait_end (dev); 637u32 intel_ring_get_seqno(struct drm_device *dev,
567 return -EBUSY; 638 struct intel_ring_buffer *ring)
639{
640 u32 seqno;
641 seqno = ring->next_seqno;
642
643 /* reserve 0 for non-seqno */
644 if (++ring->next_seqno == 0)
645 ring->next_seqno = 1;
646 return seqno;
568} 647}
648
649struct intel_ring_buffer render_ring = {
650 .name = "render ring",
651 .regs = {
652 .ctl = PRB0_CTL,
653 .head = PRB0_HEAD,
654 .tail = PRB0_TAIL,
655 .start = PRB0_START
656 },
657 .ring_flag = I915_EXEC_RENDER,
658 .size = 32 * PAGE_SIZE,
659 .alignment = PAGE_SIZE,
660 .virtual_start = NULL,
661 .dev = NULL,
662 .gem_object = NULL,
663 .head = 0,
664 .tail = 0,
665 .space = 0,
666 .next_seqno = 1,
667 .user_irq_refcount = 0,
668 .irq_gem_seqno = 0,
669 .waiting_gem_seqno = 0,
670 .setup_status_page = render_setup_status_page,
671 .init = init_render_ring,
672 .get_head = render_ring_get_head,
673 .get_tail = render_ring_get_tail,
674 .get_active_head = render_ring_get_active_head,
675 .advance_ring = render_ring_advance_ring,
676 .flush = render_ring_flush,
677 .add_request = render_ring_add_request,
678 .get_gem_seqno = render_ring_get_gem_seqno,
679 .user_irq_get = render_ring_get_user_irq,
680 .user_irq_put = render_ring_put_user_irq,
681 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
682 .status_page = {NULL, 0, NULL},
683 .map = {0,}
684};