diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 393 |
1 files changed, 204 insertions, 189 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cb3508f78bc3..d89b88791aac 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include "i915_drv.h" | 32 | #include "i915_drv.h" |
33 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
34 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
35 | #include "intel_drv.h" | ||
35 | 36 | ||
36 | static u32 i915_gem_get_seqno(struct drm_device *dev) | 37 | static u32 i915_gem_get_seqno(struct drm_device *dev) |
37 | { | 38 | { |
@@ -49,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev) | |||
49 | 50 | ||
50 | static void | 51 | static void |
51 | render_ring_flush(struct drm_device *dev, | 52 | render_ring_flush(struct drm_device *dev, |
52 | struct intel_ring_buffer *ring, | 53 | struct intel_ring_buffer *ring, |
53 | u32 invalidate_domains, | 54 | u32 invalidate_domains, |
54 | u32 flush_domains) | 55 | u32 flush_domains) |
55 | { | 56 | { |
56 | drm_i915_private_t *dev_priv = dev->dev_private; | 57 | drm_i915_private_t *dev_priv = dev->dev_private; |
57 | u32 cmd; | 58 | u32 cmd; |
@@ -97,7 +98,7 @@ render_ring_flush(struct drm_device *dev, | |||
97 | if ((invalidate_domains|flush_domains) & | 98 | if ((invalidate_domains|flush_domains) & |
98 | I915_GEM_DOMAIN_RENDER) | 99 | I915_GEM_DOMAIN_RENDER) |
99 | cmd &= ~MI_NO_WRITE_FLUSH; | 100 | cmd &= ~MI_NO_WRITE_FLUSH; |
100 | if (!IS_I965G(dev)) { | 101 | if (INTEL_INFO(dev)->gen < 4) { |
101 | /* | 102 | /* |
102 | * On the 965, the sampler cache always gets flushed | 103 | * On the 965, the sampler cache always gets flushed |
103 | * and this bit is reserved. | 104 | * and this bit is reserved. |
@@ -118,38 +119,26 @@ render_ring_flush(struct drm_device *dev, | |||
118 | } | 119 | } |
119 | } | 120 | } |
120 | 121 | ||
121 | static unsigned int render_ring_get_head(struct drm_device *dev, | 122 | static void ring_set_tail(struct drm_device *dev, |
122 | struct intel_ring_buffer *ring) | 123 | struct intel_ring_buffer *ring, |
123 | { | 124 | u32 value) |
124 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
125 | return I915_READ(PRB0_HEAD) & HEAD_ADDR; | ||
126 | } | ||
127 | |||
128 | static unsigned int render_ring_get_tail(struct drm_device *dev, | ||
129 | struct intel_ring_buffer *ring) | ||
130 | { | 125 | { |
131 | drm_i915_private_t *dev_priv = dev->dev_private; | 126 | drm_i915_private_t *dev_priv = dev->dev_private; |
132 | return I915_READ(PRB0_TAIL) & TAIL_ADDR; | 127 | I915_WRITE_TAIL(ring, ring->tail); |
133 | } | 128 | } |
134 | 129 | ||
135 | static unsigned int render_ring_get_active_head(struct drm_device *dev, | 130 | u32 intel_ring_get_active_head(struct drm_device *dev, |
136 | struct intel_ring_buffer *ring) | 131 | struct intel_ring_buffer *ring) |
137 | { | 132 | { |
138 | drm_i915_private_t *dev_priv = dev->dev_private; | 133 | drm_i915_private_t *dev_priv = dev->dev_private; |
139 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; | 134 | u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? |
135 | RING_ACTHD(ring->mmio_base) : ACTHD; | ||
140 | 136 | ||
141 | return I915_READ(acthd_reg); | 137 | return I915_READ(acthd_reg); |
142 | } | 138 | } |
143 | 139 | ||
144 | static void render_ring_advance_ring(struct drm_device *dev, | ||
145 | struct intel_ring_buffer *ring) | ||
146 | { | ||
147 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
148 | I915_WRITE(PRB0_TAIL, ring->tail); | ||
149 | } | ||
150 | |||
151 | static int init_ring_common(struct drm_device *dev, | 140 | static int init_ring_common(struct drm_device *dev, |
152 | struct intel_ring_buffer *ring) | 141 | struct intel_ring_buffer *ring) |
153 | { | 142 | { |
154 | u32 head; | 143 | u32 head; |
155 | drm_i915_private_t *dev_priv = dev->dev_private; | 144 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -157,57 +146,57 @@ static int init_ring_common(struct drm_device *dev, | |||
157 | obj_priv = to_intel_bo(ring->gem_object); | 146 | obj_priv = to_intel_bo(ring->gem_object); |
158 | 147 | ||
159 | /* Stop the ring if it's running. */ | 148 | /* Stop the ring if it's running. */ |
160 | I915_WRITE(ring->regs.ctl, 0); | 149 | I915_WRITE_CTL(ring, 0); |
161 | I915_WRITE(ring->regs.head, 0); | 150 | I915_WRITE_HEAD(ring, 0); |
162 | I915_WRITE(ring->regs.tail, 0); | 151 | ring->set_tail(dev, ring, 0); |
163 | 152 | ||
164 | /* Initialize the ring. */ | 153 | /* Initialize the ring. */ |
165 | I915_WRITE(ring->regs.start, obj_priv->gtt_offset); | 154 | I915_WRITE_START(ring, obj_priv->gtt_offset); |
166 | head = ring->get_head(dev, ring); | 155 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
167 | 156 | ||
168 | /* G45 ring initialization fails to reset head to zero */ | 157 | /* G45 ring initialization fails to reset head to zero */ |
169 | if (head != 0) { | 158 | if (head != 0) { |
170 | DRM_ERROR("%s head not reset to zero " | 159 | DRM_ERROR("%s head not reset to zero " |
171 | "ctl %08x head %08x tail %08x start %08x\n", | 160 | "ctl %08x head %08x tail %08x start %08x\n", |
172 | ring->name, | 161 | ring->name, |
173 | I915_READ(ring->regs.ctl), | 162 | I915_READ_CTL(ring), |
174 | I915_READ(ring->regs.head), | 163 | I915_READ_HEAD(ring), |
175 | I915_READ(ring->regs.tail), | 164 | I915_READ_TAIL(ring), |
176 | I915_READ(ring->regs.start)); | 165 | I915_READ_START(ring)); |
177 | 166 | ||
178 | I915_WRITE(ring->regs.head, 0); | 167 | I915_WRITE_HEAD(ring, 0); |
179 | 168 | ||
180 | DRM_ERROR("%s head forced to zero " | 169 | DRM_ERROR("%s head forced to zero " |
181 | "ctl %08x head %08x tail %08x start %08x\n", | 170 | "ctl %08x head %08x tail %08x start %08x\n", |
182 | ring->name, | 171 | ring->name, |
183 | I915_READ(ring->regs.ctl), | 172 | I915_READ_CTL(ring), |
184 | I915_READ(ring->regs.head), | 173 | I915_READ_HEAD(ring), |
185 | I915_READ(ring->regs.tail), | 174 | I915_READ_TAIL(ring), |
186 | I915_READ(ring->regs.start)); | 175 | I915_READ_START(ring)); |
187 | } | 176 | } |
188 | 177 | ||
189 | I915_WRITE(ring->regs.ctl, | 178 | I915_WRITE_CTL(ring, |
190 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) | 179 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) |
191 | | RING_NO_REPORT | RING_VALID); | 180 | | RING_NO_REPORT | RING_VALID); |
192 | 181 | ||
193 | head = I915_READ(ring->regs.head) & HEAD_ADDR; | 182 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
194 | /* If the head is still not zero, the ring is dead */ | 183 | /* If the head is still not zero, the ring is dead */ |
195 | if (head != 0) { | 184 | if (head != 0) { |
196 | DRM_ERROR("%s initialization failed " | 185 | DRM_ERROR("%s initialization failed " |
197 | "ctl %08x head %08x tail %08x start %08x\n", | 186 | "ctl %08x head %08x tail %08x start %08x\n", |
198 | ring->name, | 187 | ring->name, |
199 | I915_READ(ring->regs.ctl), | 188 | I915_READ_CTL(ring), |
200 | I915_READ(ring->regs.head), | 189 | I915_READ_HEAD(ring), |
201 | I915_READ(ring->regs.tail), | 190 | I915_READ_TAIL(ring), |
202 | I915_READ(ring->regs.start)); | 191 | I915_READ_START(ring)); |
203 | return -EIO; | 192 | return -EIO; |
204 | } | 193 | } |
205 | 194 | ||
206 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 195 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
207 | i915_kernel_lost_context(dev); | 196 | i915_kernel_lost_context(dev); |
208 | else { | 197 | else { |
209 | ring->head = ring->get_head(dev, ring); | 198 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
210 | ring->tail = ring->get_tail(dev, ring); | 199 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
211 | ring->space = ring->head - (ring->tail + 8); | 200 | ring->space = ring->head - (ring->tail + 8); |
212 | if (ring->space < 0) | 201 | if (ring->space < 0) |
213 | ring->space += ring->size; | 202 | ring->space += ring->size; |
@@ -216,13 +205,13 @@ static int init_ring_common(struct drm_device *dev, | |||
216 | } | 205 | } |
217 | 206 | ||
218 | static int init_render_ring(struct drm_device *dev, | 207 | static int init_render_ring(struct drm_device *dev, |
219 | struct intel_ring_buffer *ring) | 208 | struct intel_ring_buffer *ring) |
220 | { | 209 | { |
221 | drm_i915_private_t *dev_priv = dev->dev_private; | 210 | drm_i915_private_t *dev_priv = dev->dev_private; |
222 | int ret = init_ring_common(dev, ring); | 211 | int ret = init_ring_common(dev, ring); |
223 | int mode; | 212 | int mode; |
224 | 213 | ||
225 | if (IS_I9XX(dev) && !IS_GEN3(dev)) { | 214 | if (INTEL_INFO(dev)->gen > 3) { |
226 | mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; | 215 | mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
227 | if (IS_GEN6(dev)) | 216 | if (IS_GEN6(dev)) |
228 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | 217 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
@@ -250,9 +239,8 @@ do { \ | |||
250 | */ | 239 | */ |
251 | static u32 | 240 | static u32 |
252 | render_ring_add_request(struct drm_device *dev, | 241 | render_ring_add_request(struct drm_device *dev, |
253 | struct intel_ring_buffer *ring, | 242 | struct intel_ring_buffer *ring, |
254 | struct drm_file *file_priv, | 243 | u32 flush_domains) |
255 | u32 flush_domains) | ||
256 | { | 244 | { |
257 | drm_i915_private_t *dev_priv = dev->dev_private; | 245 | drm_i915_private_t *dev_priv = dev->dev_private; |
258 | u32 seqno; | 246 | u32 seqno; |
@@ -315,8 +303,8 @@ render_ring_add_request(struct drm_device *dev, | |||
315 | } | 303 | } |
316 | 304 | ||
317 | static u32 | 305 | static u32 |
318 | render_ring_get_gem_seqno(struct drm_device *dev, | 306 | render_ring_get_seqno(struct drm_device *dev, |
319 | struct intel_ring_buffer *ring) | 307 | struct intel_ring_buffer *ring) |
320 | { | 308 | { |
321 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 309 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
322 | if (HAS_PIPE_CONTROL(dev)) | 310 | if (HAS_PIPE_CONTROL(dev)) |
@@ -327,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev, | |||
327 | 315 | ||
328 | static void | 316 | static void |
329 | render_ring_get_user_irq(struct drm_device *dev, | 317 | render_ring_get_user_irq(struct drm_device *dev, |
330 | struct intel_ring_buffer *ring) | 318 | struct intel_ring_buffer *ring) |
331 | { | 319 | { |
332 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 320 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
333 | unsigned long irqflags; | 321 | unsigned long irqflags; |
@@ -344,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev, | |||
344 | 332 | ||
345 | static void | 333 | static void |
346 | render_ring_put_user_irq(struct drm_device *dev, | 334 | render_ring_put_user_irq(struct drm_device *dev, |
347 | struct intel_ring_buffer *ring) | 335 | struct intel_ring_buffer *ring) |
348 | { | 336 | { |
349 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 337 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
350 | unsigned long irqflags; | 338 | unsigned long irqflags; |
@@ -360,21 +348,23 @@ render_ring_put_user_irq(struct drm_device *dev, | |||
360 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 348 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
361 | } | 349 | } |
362 | 350 | ||
363 | static void render_setup_status_page(struct drm_device *dev, | 351 | void intel_ring_setup_status_page(struct drm_device *dev, |
364 | struct intel_ring_buffer *ring) | 352 | struct intel_ring_buffer *ring) |
365 | { | 353 | { |
366 | drm_i915_private_t *dev_priv = dev->dev_private; | 354 | drm_i915_private_t *dev_priv = dev->dev_private; |
367 | if (IS_GEN6(dev)) { | 355 | if (IS_GEN6(dev)) { |
368 | I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); | 356 | I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), |
369 | I915_READ(HWS_PGA_GEN6); /* posting read */ | 357 | ring->status_page.gfx_addr); |
358 | I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */ | ||
370 | } else { | 359 | } else { |
371 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | 360 | I915_WRITE(RING_HWS_PGA(ring->mmio_base), |
372 | I915_READ(HWS_PGA); /* posting read */ | 361 | ring->status_page.gfx_addr); |
362 | I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */ | ||
373 | } | 363 | } |
374 | 364 | ||
375 | } | 365 | } |
376 | 366 | ||
377 | void | 367 | static void |
378 | bsd_ring_flush(struct drm_device *dev, | 368 | bsd_ring_flush(struct drm_device *dev, |
379 | struct intel_ring_buffer *ring, | 369 | struct intel_ring_buffer *ring, |
380 | u32 invalidate_domains, | 370 | u32 invalidate_domains, |
@@ -386,45 +376,16 @@ bsd_ring_flush(struct drm_device *dev, | |||
386 | intel_ring_advance(dev, ring); | 376 | intel_ring_advance(dev, ring); |
387 | } | 377 | } |
388 | 378 | ||
389 | static inline unsigned int bsd_ring_get_head(struct drm_device *dev, | ||
390 | struct intel_ring_buffer *ring) | ||
391 | { | ||
392 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
393 | return I915_READ(BSD_RING_HEAD) & HEAD_ADDR; | ||
394 | } | ||
395 | |||
396 | static inline unsigned int bsd_ring_get_tail(struct drm_device *dev, | ||
397 | struct intel_ring_buffer *ring) | ||
398 | { | ||
399 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
400 | return I915_READ(BSD_RING_TAIL) & TAIL_ADDR; | ||
401 | } | ||
402 | |||
403 | static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, | ||
404 | struct intel_ring_buffer *ring) | ||
405 | { | ||
406 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
407 | return I915_READ(BSD_RING_ACTHD); | ||
408 | } | ||
409 | |||
410 | static inline void bsd_ring_advance_ring(struct drm_device *dev, | ||
411 | struct intel_ring_buffer *ring) | ||
412 | { | ||
413 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
414 | I915_WRITE(BSD_RING_TAIL, ring->tail); | ||
415 | } | ||
416 | |||
417 | static int init_bsd_ring(struct drm_device *dev, | 379 | static int init_bsd_ring(struct drm_device *dev, |
418 | struct intel_ring_buffer *ring) | 380 | struct intel_ring_buffer *ring) |
419 | { | 381 | { |
420 | return init_ring_common(dev, ring); | 382 | return init_ring_common(dev, ring); |
421 | } | 383 | } |
422 | 384 | ||
423 | static u32 | 385 | static u32 |
424 | bsd_ring_add_request(struct drm_device *dev, | 386 | bsd_ring_add_request(struct drm_device *dev, |
425 | struct intel_ring_buffer *ring, | 387 | struct intel_ring_buffer *ring, |
426 | struct drm_file *file_priv, | 388 | u32 flush_domains) |
427 | u32 flush_domains) | ||
428 | { | 389 | { |
429 | u32 seqno; | 390 | u32 seqno; |
430 | 391 | ||
@@ -443,40 +404,32 @@ bsd_ring_add_request(struct drm_device *dev, | |||
443 | return seqno; | 404 | return seqno; |
444 | } | 405 | } |
445 | 406 | ||
446 | static void bsd_setup_status_page(struct drm_device *dev, | ||
447 | struct intel_ring_buffer *ring) | ||
448 | { | ||
449 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
450 | I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); | ||
451 | I915_READ(BSD_HWS_PGA); | ||
452 | } | ||
453 | |||
454 | static void | 407 | static void |
455 | bsd_ring_get_user_irq(struct drm_device *dev, | 408 | bsd_ring_get_user_irq(struct drm_device *dev, |
456 | struct intel_ring_buffer *ring) | 409 | struct intel_ring_buffer *ring) |
457 | { | 410 | { |
458 | /* do nothing */ | 411 | /* do nothing */ |
459 | } | 412 | } |
460 | static void | 413 | static void |
461 | bsd_ring_put_user_irq(struct drm_device *dev, | 414 | bsd_ring_put_user_irq(struct drm_device *dev, |
462 | struct intel_ring_buffer *ring) | 415 | struct intel_ring_buffer *ring) |
463 | { | 416 | { |
464 | /* do nothing */ | 417 | /* do nothing */ |
465 | } | 418 | } |
466 | 419 | ||
467 | static u32 | 420 | static u32 |
468 | bsd_ring_get_gem_seqno(struct drm_device *dev, | 421 | bsd_ring_get_seqno(struct drm_device *dev, |
469 | struct intel_ring_buffer *ring) | 422 | struct intel_ring_buffer *ring) |
470 | { | 423 | { |
471 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 424 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
472 | } | 425 | } |
473 | 426 | ||
474 | static int | 427 | static int |
475 | bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 428 | bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, |
476 | struct intel_ring_buffer *ring, | 429 | struct intel_ring_buffer *ring, |
477 | struct drm_i915_gem_execbuffer2 *exec, | 430 | struct drm_i915_gem_execbuffer2 *exec, |
478 | struct drm_clip_rect *cliprects, | 431 | struct drm_clip_rect *cliprects, |
479 | uint64_t exec_offset) | 432 | uint64_t exec_offset) |
480 | { | 433 | { |
481 | uint32_t exec_start; | 434 | uint32_t exec_start; |
482 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 435 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
@@ -491,10 +444,10 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
491 | 444 | ||
492 | static int | 445 | static int |
493 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 446 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, |
494 | struct intel_ring_buffer *ring, | 447 | struct intel_ring_buffer *ring, |
495 | struct drm_i915_gem_execbuffer2 *exec, | 448 | struct drm_i915_gem_execbuffer2 *exec, |
496 | struct drm_clip_rect *cliprects, | 449 | struct drm_clip_rect *cliprects, |
497 | uint64_t exec_offset) | 450 | uint64_t exec_offset) |
498 | { | 451 | { |
499 | drm_i915_private_t *dev_priv = dev->dev_private; | 452 | drm_i915_private_t *dev_priv = dev->dev_private; |
500 | int nbox = exec->num_cliprects; | 453 | int nbox = exec->num_cliprects; |
@@ -524,7 +477,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
524 | intel_ring_emit(dev, ring, 0); | 477 | intel_ring_emit(dev, ring, 0); |
525 | } else { | 478 | } else { |
526 | intel_ring_begin(dev, ring, 4); | 479 | intel_ring_begin(dev, ring, 4); |
527 | if (IS_I965G(dev)) { | 480 | if (INTEL_INFO(dev)->gen >= 4) { |
528 | intel_ring_emit(dev, ring, | 481 | intel_ring_emit(dev, ring, |
529 | MI_BATCH_BUFFER_START | (2 << 6) | 482 | MI_BATCH_BUFFER_START | (2 << 6) |
530 | | MI_BATCH_NON_SECURE_I965); | 483 | | MI_BATCH_NON_SECURE_I965); |
@@ -553,7 +506,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
553 | } | 506 | } |
554 | 507 | ||
555 | static void cleanup_status_page(struct drm_device *dev, | 508 | static void cleanup_status_page(struct drm_device *dev, |
556 | struct intel_ring_buffer *ring) | 509 | struct intel_ring_buffer *ring) |
557 | { | 510 | { |
558 | drm_i915_private_t *dev_priv = dev->dev_private; | 511 | drm_i915_private_t *dev_priv = dev->dev_private; |
559 | struct drm_gem_object *obj; | 512 | struct drm_gem_object *obj; |
@@ -573,7 +526,7 @@ static void cleanup_status_page(struct drm_device *dev, | |||
573 | } | 526 | } |
574 | 527 | ||
575 | static int init_status_page(struct drm_device *dev, | 528 | static int init_status_page(struct drm_device *dev, |
576 | struct intel_ring_buffer *ring) | 529 | struct intel_ring_buffer *ring) |
577 | { | 530 | { |
578 | drm_i915_private_t *dev_priv = dev->dev_private; | 531 | drm_i915_private_t *dev_priv = dev->dev_private; |
579 | struct drm_gem_object *obj; | 532 | struct drm_gem_object *obj; |
@@ -603,7 +556,7 @@ static int init_status_page(struct drm_device *dev, | |||
603 | ring->status_page.obj = obj; | 556 | ring->status_page.obj = obj; |
604 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 557 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
605 | 558 | ||
606 | ring->setup_status_page(dev, ring); | 559 | intel_ring_setup_status_page(dev, ring); |
607 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | 560 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
608 | ring->name, ring->status_page.gfx_addr); | 561 | ring->name, ring->status_page.gfx_addr); |
609 | 562 | ||
@@ -617,15 +570,17 @@ err: | |||
617 | return ret; | 570 | return ret; |
618 | } | 571 | } |
619 | 572 | ||
620 | |||
621 | int intel_init_ring_buffer(struct drm_device *dev, | 573 | int intel_init_ring_buffer(struct drm_device *dev, |
622 | struct intel_ring_buffer *ring) | 574 | struct intel_ring_buffer *ring) |
623 | { | 575 | { |
576 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
624 | struct drm_i915_gem_object *obj_priv; | 577 | struct drm_i915_gem_object *obj_priv; |
625 | struct drm_gem_object *obj; | 578 | struct drm_gem_object *obj; |
626 | int ret; | 579 | int ret; |
627 | 580 | ||
628 | ring->dev = dev; | 581 | ring->dev = dev; |
582 | INIT_LIST_HEAD(&ring->active_list); | ||
583 | INIT_LIST_HEAD(&ring->request_list); | ||
629 | 584 | ||
630 | if (I915_NEED_GFX_HWS(dev)) { | 585 | if (I915_NEED_GFX_HWS(dev)) { |
631 | ret = init_status_page(dev, ring); | 586 | ret = init_status_page(dev, ring); |
@@ -642,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
642 | 597 | ||
643 | ring->gem_object = obj; | 598 | ring->gem_object = obj; |
644 | 599 | ||
645 | ret = i915_gem_object_pin(obj, ring->alignment); | 600 | ret = i915_gem_object_pin(obj, PAGE_SIZE); |
646 | if (ret) | 601 | if (ret) |
647 | goto err_unref; | 602 | goto err_unref; |
648 | 603 | ||
@@ -668,14 +623,12 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
668 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 623 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
669 | i915_kernel_lost_context(dev); | 624 | i915_kernel_lost_context(dev); |
670 | else { | 625 | else { |
671 | ring->head = ring->get_head(dev, ring); | 626 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
672 | ring->tail = ring->get_tail(dev, ring); | 627 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
673 | ring->space = ring->head - (ring->tail + 8); | 628 | ring->space = ring->head - (ring->tail + 8); |
674 | if (ring->space < 0) | 629 | if (ring->space < 0) |
675 | ring->space += ring->size; | 630 | ring->space += ring->size; |
676 | } | 631 | } |
677 | INIT_LIST_HEAD(&ring->active_list); | ||
678 | INIT_LIST_HEAD(&ring->request_list); | ||
679 | return ret; | 632 | return ret; |
680 | 633 | ||
681 | err_unmap: | 634 | err_unmap: |
@@ -691,7 +644,7 @@ err_hws: | |||
691 | } | 644 | } |
692 | 645 | ||
693 | void intel_cleanup_ring_buffer(struct drm_device *dev, | 646 | void intel_cleanup_ring_buffer(struct drm_device *dev, |
694 | struct intel_ring_buffer *ring) | 647 | struct intel_ring_buffer *ring) |
695 | { | 648 | { |
696 | if (ring->gem_object == NULL) | 649 | if (ring->gem_object == NULL) |
697 | return; | 650 | return; |
@@ -704,8 +657,8 @@ void intel_cleanup_ring_buffer(struct drm_device *dev, | |||
704 | cleanup_status_page(dev, ring); | 657 | cleanup_status_page(dev, ring); |
705 | } | 658 | } |
706 | 659 | ||
707 | int intel_wrap_ring_buffer(struct drm_device *dev, | 660 | static int intel_wrap_ring_buffer(struct drm_device *dev, |
708 | struct intel_ring_buffer *ring) | 661 | struct intel_ring_buffer *ring) |
709 | { | 662 | { |
710 | unsigned int *virt; | 663 | unsigned int *virt; |
711 | int rem; | 664 | int rem; |
@@ -731,14 +684,15 @@ int intel_wrap_ring_buffer(struct drm_device *dev, | |||
731 | } | 684 | } |
732 | 685 | ||
733 | int intel_wait_ring_buffer(struct drm_device *dev, | 686 | int intel_wait_ring_buffer(struct drm_device *dev, |
734 | struct intel_ring_buffer *ring, int n) | 687 | struct intel_ring_buffer *ring, int n) |
735 | { | 688 | { |
736 | unsigned long end; | 689 | unsigned long end; |
690 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
737 | 691 | ||
738 | trace_i915_ring_wait_begin (dev); | 692 | trace_i915_ring_wait_begin (dev); |
739 | end = jiffies + 3 * HZ; | 693 | end = jiffies + 3 * HZ; |
740 | do { | 694 | do { |
741 | ring->head = ring->get_head(dev, ring); | 695 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
742 | ring->space = ring->head - (ring->tail + 8); | 696 | ring->space = ring->head - (ring->tail + 8); |
743 | if (ring->space < 0) | 697 | if (ring->space < 0) |
744 | ring->space += ring->size; | 698 | ring->space += ring->size; |
@@ -760,7 +714,8 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
760 | } | 714 | } |
761 | 715 | ||
762 | void intel_ring_begin(struct drm_device *dev, | 716 | void intel_ring_begin(struct drm_device *dev, |
763 | struct intel_ring_buffer *ring, int num_dwords) | 717 | struct intel_ring_buffer *ring, |
718 | int num_dwords) | ||
764 | { | 719 | { |
765 | int n = 4*num_dwords; | 720 | int n = 4*num_dwords; |
766 | if (unlikely(ring->tail + n > ring->size)) | 721 | if (unlikely(ring->tail + n > ring->size)) |
@@ -772,16 +727,16 @@ void intel_ring_begin(struct drm_device *dev, | |||
772 | } | 727 | } |
773 | 728 | ||
774 | void intel_ring_advance(struct drm_device *dev, | 729 | void intel_ring_advance(struct drm_device *dev, |
775 | struct intel_ring_buffer *ring) | 730 | struct intel_ring_buffer *ring) |
776 | { | 731 | { |
777 | ring->tail &= ring->size - 1; | 732 | ring->tail &= ring->size - 1; |
778 | ring->advance_ring(dev, ring); | 733 | ring->set_tail(dev, ring, ring->tail); |
779 | } | 734 | } |
780 | 735 | ||
781 | void intel_fill_struct(struct drm_device *dev, | 736 | void intel_fill_struct(struct drm_device *dev, |
782 | struct intel_ring_buffer *ring, | 737 | struct intel_ring_buffer *ring, |
783 | void *data, | 738 | void *data, |
784 | unsigned int len) | 739 | unsigned int len) |
785 | { | 740 | { |
786 | unsigned int *virt = ring->virtual_start + ring->tail; | 741 | unsigned int *virt = ring->virtual_start + ring->tail; |
787 | BUG_ON((len&~(4-1)) != 0); | 742 | BUG_ON((len&~(4-1)) != 0); |
@@ -793,76 +748,136 @@ void intel_fill_struct(struct drm_device *dev, | |||
793 | intel_ring_advance(dev, ring); | 748 | intel_ring_advance(dev, ring); |
794 | } | 749 | } |
795 | 750 | ||
796 | struct intel_ring_buffer render_ring = { | 751 | static const struct intel_ring_buffer render_ring = { |
797 | .name = "render ring", | 752 | .name = "render ring", |
798 | .regs = { | 753 | .id = RING_RENDER, |
799 | .ctl = PRB0_CTL, | 754 | .mmio_base = RENDER_RING_BASE, |
800 | .head = PRB0_HEAD, | ||
801 | .tail = PRB0_TAIL, | ||
802 | .start = PRB0_START | ||
803 | }, | ||
804 | .ring_flag = I915_EXEC_RENDER, | ||
805 | .size = 32 * PAGE_SIZE, | 755 | .size = 32 * PAGE_SIZE, |
806 | .alignment = PAGE_SIZE, | ||
807 | .virtual_start = NULL, | ||
808 | .dev = NULL, | ||
809 | .gem_object = NULL, | ||
810 | .head = 0, | ||
811 | .tail = 0, | ||
812 | .space = 0, | ||
813 | .user_irq_refcount = 0, | ||
814 | .irq_gem_seqno = 0, | ||
815 | .waiting_gem_seqno = 0, | ||
816 | .setup_status_page = render_setup_status_page, | ||
817 | .init = init_render_ring, | 756 | .init = init_render_ring, |
818 | .get_head = render_ring_get_head, | 757 | .set_tail = ring_set_tail, |
819 | .get_tail = render_ring_get_tail, | ||
820 | .get_active_head = render_ring_get_active_head, | ||
821 | .advance_ring = render_ring_advance_ring, | ||
822 | .flush = render_ring_flush, | 758 | .flush = render_ring_flush, |
823 | .add_request = render_ring_add_request, | 759 | .add_request = render_ring_add_request, |
824 | .get_gem_seqno = render_ring_get_gem_seqno, | 760 | .get_seqno = render_ring_get_seqno, |
825 | .user_irq_get = render_ring_get_user_irq, | 761 | .user_irq_get = render_ring_get_user_irq, |
826 | .user_irq_put = render_ring_put_user_irq, | 762 | .user_irq_put = render_ring_put_user_irq, |
827 | .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, | 763 | .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, |
828 | .status_page = {NULL, 0, NULL}, | ||
829 | .map = {0,} | ||
830 | }; | 764 | }; |
831 | 765 | ||
832 | /* ring buffer for bit-stream decoder */ | 766 | /* ring buffer for bit-stream decoder */ |
833 | 767 | ||
834 | struct intel_ring_buffer bsd_ring = { | 768 | static const struct intel_ring_buffer bsd_ring = { |
835 | .name = "bsd ring", | 769 | .name = "bsd ring", |
836 | .regs = { | 770 | .id = RING_BSD, |
837 | .ctl = BSD_RING_CTL, | 771 | .mmio_base = BSD_RING_BASE, |
838 | .head = BSD_RING_HEAD, | ||
839 | .tail = BSD_RING_TAIL, | ||
840 | .start = BSD_RING_START | ||
841 | }, | ||
842 | .ring_flag = I915_EXEC_BSD, | ||
843 | .size = 32 * PAGE_SIZE, | 772 | .size = 32 * PAGE_SIZE, |
844 | .alignment = PAGE_SIZE, | ||
845 | .virtual_start = NULL, | ||
846 | .dev = NULL, | ||
847 | .gem_object = NULL, | ||
848 | .head = 0, | ||
849 | .tail = 0, | ||
850 | .space = 0, | ||
851 | .user_irq_refcount = 0, | ||
852 | .irq_gem_seqno = 0, | ||
853 | .waiting_gem_seqno = 0, | ||
854 | .setup_status_page = bsd_setup_status_page, | ||
855 | .init = init_bsd_ring, | 773 | .init = init_bsd_ring, |
856 | .get_head = bsd_ring_get_head, | 774 | .set_tail = ring_set_tail, |
857 | .get_tail = bsd_ring_get_tail, | ||
858 | .get_active_head = bsd_ring_get_active_head, | ||
859 | .advance_ring = bsd_ring_advance_ring, | ||
860 | .flush = bsd_ring_flush, | 775 | .flush = bsd_ring_flush, |
861 | .add_request = bsd_ring_add_request, | 776 | .add_request = bsd_ring_add_request, |
862 | .get_gem_seqno = bsd_ring_get_gem_seqno, | 777 | .get_seqno = bsd_ring_get_seqno, |
863 | .user_irq_get = bsd_ring_get_user_irq, | 778 | .user_irq_get = bsd_ring_get_user_irq, |
864 | .user_irq_put = bsd_ring_put_user_irq, | 779 | .user_irq_put = bsd_ring_put_user_irq, |
865 | .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, | 780 | .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, |
866 | .status_page = {NULL, 0, NULL}, | ||
867 | .map = {0,} | ||
868 | }; | 781 | }; |
782 | |||
783 | |||
784 | static void gen6_bsd_ring_set_tail(struct drm_device *dev, | ||
785 | struct intel_ring_buffer *ring, | ||
786 | u32 value) | ||
787 | { | ||
788 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
789 | |||
790 | /* Every tail move must follow the sequence below */ | ||
791 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | ||
792 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | ||
793 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); | ||
794 | I915_WRITE(GEN6_BSD_RNCID, 0x0); | ||
795 | |||
796 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & | ||
797 | GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, | ||
798 | 50)) | ||
799 | DRM_ERROR("timed out waiting for IDLE Indicator\n"); | ||
800 | |||
801 | I915_WRITE_TAIL(ring, value); | ||
802 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | ||
803 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | ||
804 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | ||
805 | } | ||
806 | |||
807 | static void gen6_bsd_ring_flush(struct drm_device *dev, | ||
808 | struct intel_ring_buffer *ring, | ||
809 | u32 invalidate_domains, | ||
810 | u32 flush_domains) | ||
811 | { | ||
812 | intel_ring_begin(dev, ring, 4); | ||
813 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | ||
814 | intel_ring_emit(dev, ring, 0); | ||
815 | intel_ring_emit(dev, ring, 0); | ||
816 | intel_ring_emit(dev, ring, 0); | ||
817 | intel_ring_advance(dev, ring); | ||
818 | } | ||
819 | |||
820 | static int | ||
821 | gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | ||
822 | struct intel_ring_buffer *ring, | ||
823 | struct drm_i915_gem_execbuffer2 *exec, | ||
824 | struct drm_clip_rect *cliprects, | ||
825 | uint64_t exec_offset) | ||
826 | { | ||
827 | uint32_t exec_start; | ||
828 | |||
829 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
830 | |||
831 | intel_ring_begin(dev, ring, 2); | ||
832 | intel_ring_emit(dev, ring, | ||
833 | MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); | ||
834 | /* bit0-7 is the length on GEN6+ */ | ||
835 | intel_ring_emit(dev, ring, exec_start); | ||
836 | intel_ring_advance(dev, ring); | ||
837 | |||
838 | return 0; | ||
839 | } | ||
840 | |||
841 | /* ring buffer for Video Codec for Gen6+ */ | ||
842 | static const struct intel_ring_buffer gen6_bsd_ring = { | ||
843 | .name = "gen6 bsd ring", | ||
844 | .id = RING_BSD, | ||
845 | .mmio_base = GEN6_BSD_RING_BASE, | ||
846 | .size = 32 * PAGE_SIZE, | ||
847 | .init = init_bsd_ring, | ||
848 | .set_tail = gen6_bsd_ring_set_tail, | ||
849 | .flush = gen6_bsd_ring_flush, | ||
850 | .add_request = bsd_ring_add_request, | ||
851 | .get_seqno = bsd_ring_get_seqno, | ||
852 | .user_irq_get = bsd_ring_get_user_irq, | ||
853 | .user_irq_put = bsd_ring_put_user_irq, | ||
854 | .dispatch_gem_execbuffer = gen6_bsd_ring_dispatch_gem_execbuffer, | ||
855 | }; | ||
856 | |||
857 | int intel_init_render_ring_buffer(struct drm_device *dev) | ||
858 | { | ||
859 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
860 | |||
861 | dev_priv->render_ring = render_ring; | ||
862 | |||
863 | if (!I915_NEED_GFX_HWS(dev)) { | ||
864 | dev_priv->render_ring.status_page.page_addr | ||
865 | = dev_priv->status_page_dmah->vaddr; | ||
866 | memset(dev_priv->render_ring.status_page.page_addr, | ||
867 | 0, PAGE_SIZE); | ||
868 | } | ||
869 | |||
870 | return intel_init_ring_buffer(dev, &dev_priv->render_ring); | ||
871 | } | ||
872 | |||
873 | int intel_init_bsd_ring_buffer(struct drm_device *dev) | ||
874 | { | ||
875 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
876 | |||
877 | if (IS_GEN6(dev)) | ||
878 | dev_priv->bsd_ring = gen6_bsd_ring; | ||
879 | else | ||
880 | dev_priv->bsd_ring = bsd_ring; | ||
881 | |||
882 | return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); | ||
883 | } | ||