diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 564 |
1 files changed, 310 insertions, 254 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 26362f8495a8..09f2dc353ae2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -32,19 +32,37 @@ | |||
32 | #include "i915_drv.h" | 32 | #include "i915_drv.h" |
33 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
34 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
35 | #include "intel_drv.h" | ||
36 | |||
37 | static u32 i915_gem_get_seqno(struct drm_device *dev) | ||
38 | { | ||
39 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
40 | u32 seqno; | ||
41 | |||
42 | seqno = dev_priv->next_seqno; | ||
43 | |||
44 | /* reserve 0 for non-seqno */ | ||
45 | if (++dev_priv->next_seqno == 0) | ||
46 | dev_priv->next_seqno = 1; | ||
47 | |||
48 | return seqno; | ||
49 | } | ||
35 | 50 | ||
36 | static void | 51 | static void |
37 | render_ring_flush(struct drm_device *dev, | 52 | render_ring_flush(struct drm_device *dev, |
38 | struct intel_ring_buffer *ring, | 53 | struct intel_ring_buffer *ring, |
39 | u32 invalidate_domains, | 54 | u32 invalidate_domains, |
40 | u32 flush_domains) | 55 | u32 flush_domains) |
41 | { | 56 | { |
57 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
58 | u32 cmd; | ||
59 | |||
42 | #if WATCH_EXEC | 60 | #if WATCH_EXEC |
43 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | 61 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, |
44 | invalidate_domains, flush_domains); | 62 | invalidate_domains, flush_domains); |
45 | #endif | 63 | #endif |
46 | u32 cmd; | 64 | |
47 | trace_i915_gem_request_flush(dev, ring->next_seqno, | 65 | trace_i915_gem_request_flush(dev, dev_priv->next_seqno, |
48 | invalidate_domains, flush_domains); | 66 | invalidate_domains, flush_domains); |
49 | 67 | ||
50 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { | 68 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { |
@@ -80,7 +98,7 @@ render_ring_flush(struct drm_device *dev, | |||
80 | if ((invalidate_domains|flush_domains) & | 98 | if ((invalidate_domains|flush_domains) & |
81 | I915_GEM_DOMAIN_RENDER) | 99 | I915_GEM_DOMAIN_RENDER) |
82 | cmd &= ~MI_NO_WRITE_FLUSH; | 100 | cmd &= ~MI_NO_WRITE_FLUSH; |
83 | if (!IS_I965G(dev)) { | 101 | if (INTEL_INFO(dev)->gen < 4) { |
84 | /* | 102 | /* |
85 | * On the 965, the sampler cache always gets flushed | 103 | * On the 965, the sampler cache always gets flushed |
86 | * and this bit is reserved. | 104 | * and this bit is reserved. |
@@ -101,38 +119,26 @@ render_ring_flush(struct drm_device *dev, | |||
101 | } | 119 | } |
102 | } | 120 | } |
103 | 121 | ||
104 | static unsigned int render_ring_get_head(struct drm_device *dev, | 122 | static void ring_write_tail(struct drm_device *dev, |
105 | struct intel_ring_buffer *ring) | 123 | struct intel_ring_buffer *ring, |
124 | u32 value) | ||
106 | { | 125 | { |
107 | drm_i915_private_t *dev_priv = dev->dev_private; | 126 | drm_i915_private_t *dev_priv = dev->dev_private; |
108 | return I915_READ(PRB0_HEAD) & HEAD_ADDR; | 127 | I915_WRITE_TAIL(ring, value); |
109 | } | 128 | } |
110 | 129 | ||
111 | static unsigned int render_ring_get_tail(struct drm_device *dev, | 130 | u32 intel_ring_get_active_head(struct drm_device *dev, |
112 | struct intel_ring_buffer *ring) | 131 | struct intel_ring_buffer *ring) |
113 | { | 132 | { |
114 | drm_i915_private_t *dev_priv = dev->dev_private; | 133 | drm_i915_private_t *dev_priv = dev->dev_private; |
115 | return I915_READ(PRB0_TAIL) & TAIL_ADDR; | 134 | u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? |
116 | } | 135 | RING_ACTHD(ring->mmio_base) : ACTHD; |
117 | |||
118 | static unsigned int render_ring_get_active_head(struct drm_device *dev, | ||
119 | struct intel_ring_buffer *ring) | ||
120 | { | ||
121 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
122 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; | ||
123 | 136 | ||
124 | return I915_READ(acthd_reg); | 137 | return I915_READ(acthd_reg); |
125 | } | 138 | } |
126 | 139 | ||
127 | static void render_ring_advance_ring(struct drm_device *dev, | ||
128 | struct intel_ring_buffer *ring) | ||
129 | { | ||
130 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
131 | I915_WRITE(PRB0_TAIL, ring->tail); | ||
132 | } | ||
133 | |||
134 | static int init_ring_common(struct drm_device *dev, | 140 | static int init_ring_common(struct drm_device *dev, |
135 | struct intel_ring_buffer *ring) | 141 | struct intel_ring_buffer *ring) |
136 | { | 142 | { |
137 | u32 head; | 143 | u32 head; |
138 | drm_i915_private_t *dev_priv = dev->dev_private; | 144 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -140,57 +146,57 @@ static int init_ring_common(struct drm_device *dev, | |||
140 | obj_priv = to_intel_bo(ring->gem_object); | 146 | obj_priv = to_intel_bo(ring->gem_object); |
141 | 147 | ||
142 | /* Stop the ring if it's running. */ | 148 | /* Stop the ring if it's running. */ |
143 | I915_WRITE(ring->regs.ctl, 0); | 149 | I915_WRITE_CTL(ring, 0); |
144 | I915_WRITE(ring->regs.head, 0); | 150 | I915_WRITE_HEAD(ring, 0); |
145 | I915_WRITE(ring->regs.tail, 0); | 151 | ring->write_tail(dev, ring, 0); |
146 | 152 | ||
147 | /* Initialize the ring. */ | 153 | /* Initialize the ring. */ |
148 | I915_WRITE(ring->regs.start, obj_priv->gtt_offset); | 154 | I915_WRITE_START(ring, obj_priv->gtt_offset); |
149 | head = ring->get_head(dev, ring); | 155 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
150 | 156 | ||
151 | /* G45 ring initialization fails to reset head to zero */ | 157 | /* G45 ring initialization fails to reset head to zero */ |
152 | if (head != 0) { | 158 | if (head != 0) { |
153 | DRM_ERROR("%s head not reset to zero " | 159 | DRM_ERROR("%s head not reset to zero " |
154 | "ctl %08x head %08x tail %08x start %08x\n", | 160 | "ctl %08x head %08x tail %08x start %08x\n", |
155 | ring->name, | 161 | ring->name, |
156 | I915_READ(ring->regs.ctl), | 162 | I915_READ_CTL(ring), |
157 | I915_READ(ring->regs.head), | 163 | I915_READ_HEAD(ring), |
158 | I915_READ(ring->regs.tail), | 164 | I915_READ_TAIL(ring), |
159 | I915_READ(ring->regs.start)); | 165 | I915_READ_START(ring)); |
160 | 166 | ||
161 | I915_WRITE(ring->regs.head, 0); | 167 | I915_WRITE_HEAD(ring, 0); |
162 | 168 | ||
163 | DRM_ERROR("%s head forced to zero " | 169 | DRM_ERROR("%s head forced to zero " |
164 | "ctl %08x head %08x tail %08x start %08x\n", | 170 | "ctl %08x head %08x tail %08x start %08x\n", |
165 | ring->name, | 171 | ring->name, |
166 | I915_READ(ring->regs.ctl), | 172 | I915_READ_CTL(ring), |
167 | I915_READ(ring->regs.head), | 173 | I915_READ_HEAD(ring), |
168 | I915_READ(ring->regs.tail), | 174 | I915_READ_TAIL(ring), |
169 | I915_READ(ring->regs.start)); | 175 | I915_READ_START(ring)); |
170 | } | 176 | } |
171 | 177 | ||
172 | I915_WRITE(ring->regs.ctl, | 178 | I915_WRITE_CTL(ring, |
173 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) | 179 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) |
174 | | RING_NO_REPORT | RING_VALID); | 180 | | RING_NO_REPORT | RING_VALID); |
175 | 181 | ||
176 | head = I915_READ(ring->regs.head) & HEAD_ADDR; | 182 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
177 | /* If the head is still not zero, the ring is dead */ | 183 | /* If the head is still not zero, the ring is dead */ |
178 | if (head != 0) { | 184 | if (head != 0) { |
179 | DRM_ERROR("%s initialization failed " | 185 | DRM_ERROR("%s initialization failed " |
180 | "ctl %08x head %08x tail %08x start %08x\n", | 186 | "ctl %08x head %08x tail %08x start %08x\n", |
181 | ring->name, | 187 | ring->name, |
182 | I915_READ(ring->regs.ctl), | 188 | I915_READ_CTL(ring), |
183 | I915_READ(ring->regs.head), | 189 | I915_READ_HEAD(ring), |
184 | I915_READ(ring->regs.tail), | 190 | I915_READ_TAIL(ring), |
185 | I915_READ(ring->regs.start)); | 191 | I915_READ_START(ring)); |
186 | return -EIO; | 192 | return -EIO; |
187 | } | 193 | } |
188 | 194 | ||
189 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 195 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
190 | i915_kernel_lost_context(dev); | 196 | i915_kernel_lost_context(dev); |
191 | else { | 197 | else { |
192 | ring->head = ring->get_head(dev, ring); | 198 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
193 | ring->tail = ring->get_tail(dev, ring); | 199 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
194 | ring->space = ring->head - (ring->tail + 8); | 200 | ring->space = ring->head - (ring->tail + 8); |
195 | if (ring->space < 0) | 201 | if (ring->space < 0) |
196 | ring->space += ring->size; | 202 | ring->space += ring->size; |
@@ -199,13 +205,17 @@ static int init_ring_common(struct drm_device *dev, | |||
199 | } | 205 | } |
200 | 206 | ||
201 | static int init_render_ring(struct drm_device *dev, | 207 | static int init_render_ring(struct drm_device *dev, |
202 | struct intel_ring_buffer *ring) | 208 | struct intel_ring_buffer *ring) |
203 | { | 209 | { |
204 | drm_i915_private_t *dev_priv = dev->dev_private; | 210 | drm_i915_private_t *dev_priv = dev->dev_private; |
205 | int ret = init_ring_common(dev, ring); | 211 | int ret = init_ring_common(dev, ring); |
206 | if (IS_I9XX(dev) && !IS_GEN3(dev)) { | 212 | int mode; |
207 | I915_WRITE(MI_MODE, | 213 | |
208 | (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); | 214 | if (INTEL_INFO(dev)->gen > 3) { |
215 | mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; | ||
216 | if (IS_GEN6(dev)) | ||
217 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | ||
218 | I915_WRITE(MI_MODE, mode); | ||
209 | } | 219 | } |
210 | return ret; | 220 | return ret; |
211 | } | 221 | } |
@@ -229,13 +239,13 @@ do { \ | |||
229 | */ | 239 | */ |
230 | static u32 | 240 | static u32 |
231 | render_ring_add_request(struct drm_device *dev, | 241 | render_ring_add_request(struct drm_device *dev, |
232 | struct intel_ring_buffer *ring, | 242 | struct intel_ring_buffer *ring, |
233 | struct drm_file *file_priv, | 243 | u32 flush_domains) |
234 | u32 flush_domains) | ||
235 | { | 244 | { |
236 | u32 seqno; | ||
237 | drm_i915_private_t *dev_priv = dev->dev_private; | 245 | drm_i915_private_t *dev_priv = dev->dev_private; |
238 | seqno = intel_ring_get_seqno(dev, ring); | 246 | u32 seqno; |
247 | |||
248 | seqno = i915_gem_get_seqno(dev); | ||
239 | 249 | ||
240 | if (IS_GEN6(dev)) { | 250 | if (IS_GEN6(dev)) { |
241 | BEGIN_LP_RING(6); | 251 | BEGIN_LP_RING(6); |
@@ -293,8 +303,8 @@ render_ring_add_request(struct drm_device *dev, | |||
293 | } | 303 | } |
294 | 304 | ||
295 | static u32 | 305 | static u32 |
296 | render_ring_get_gem_seqno(struct drm_device *dev, | 306 | render_ring_get_seqno(struct drm_device *dev, |
297 | struct intel_ring_buffer *ring) | 307 | struct intel_ring_buffer *ring) |
298 | { | 308 | { |
299 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 309 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
300 | if (HAS_PIPE_CONTROL(dev)) | 310 | if (HAS_PIPE_CONTROL(dev)) |
@@ -305,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev, | |||
305 | 315 | ||
306 | static void | 316 | static void |
307 | render_ring_get_user_irq(struct drm_device *dev, | 317 | render_ring_get_user_irq(struct drm_device *dev, |
308 | struct intel_ring_buffer *ring) | 318 | struct intel_ring_buffer *ring) |
309 | { | 319 | { |
310 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 320 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
311 | unsigned long irqflags; | 321 | unsigned long irqflags; |
@@ -322,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev, | |||
322 | 332 | ||
323 | static void | 333 | static void |
324 | render_ring_put_user_irq(struct drm_device *dev, | 334 | render_ring_put_user_irq(struct drm_device *dev, |
325 | struct intel_ring_buffer *ring) | 335 | struct intel_ring_buffer *ring) |
326 | { | 336 | { |
327 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 337 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
328 | unsigned long irqflags; | 338 | unsigned long irqflags; |
@@ -338,21 +348,23 @@ render_ring_put_user_irq(struct drm_device *dev, | |||
338 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 348 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
339 | } | 349 | } |
340 | 350 | ||
341 | static void render_setup_status_page(struct drm_device *dev, | 351 | void intel_ring_setup_status_page(struct drm_device *dev, |
342 | struct intel_ring_buffer *ring) | 352 | struct intel_ring_buffer *ring) |
343 | { | 353 | { |
344 | drm_i915_private_t *dev_priv = dev->dev_private; | 354 | drm_i915_private_t *dev_priv = dev->dev_private; |
345 | if (IS_GEN6(dev)) { | 355 | if (IS_GEN6(dev)) { |
346 | I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr); | 356 | I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), |
347 | I915_READ(HWS_PGA_GEN6); /* posting read */ | 357 | ring->status_page.gfx_addr); |
358 | I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */ | ||
348 | } else { | 359 | } else { |
349 | I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); | 360 | I915_WRITE(RING_HWS_PGA(ring->mmio_base), |
350 | I915_READ(HWS_PGA); /* posting read */ | 361 | ring->status_page.gfx_addr); |
362 | I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */ | ||
351 | } | 363 | } |
352 | 364 | ||
353 | } | 365 | } |
354 | 366 | ||
355 | void | 367 | static void |
356 | bsd_ring_flush(struct drm_device *dev, | 368 | bsd_ring_flush(struct drm_device *dev, |
357 | struct intel_ring_buffer *ring, | 369 | struct intel_ring_buffer *ring, |
358 | u32 invalidate_domains, | 370 | u32 invalidate_domains, |
@@ -364,48 +376,21 @@ bsd_ring_flush(struct drm_device *dev, | |||
364 | intel_ring_advance(dev, ring); | 376 | intel_ring_advance(dev, ring); |
365 | } | 377 | } |
366 | 378 | ||
367 | static inline unsigned int bsd_ring_get_head(struct drm_device *dev, | ||
368 | struct intel_ring_buffer *ring) | ||
369 | { | ||
370 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
371 | return I915_READ(BSD_RING_HEAD) & HEAD_ADDR; | ||
372 | } | ||
373 | |||
374 | static inline unsigned int bsd_ring_get_tail(struct drm_device *dev, | ||
375 | struct intel_ring_buffer *ring) | ||
376 | { | ||
377 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
378 | return I915_READ(BSD_RING_TAIL) & TAIL_ADDR; | ||
379 | } | ||
380 | |||
381 | static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev, | ||
382 | struct intel_ring_buffer *ring) | ||
383 | { | ||
384 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
385 | return I915_READ(BSD_RING_ACTHD); | ||
386 | } | ||
387 | |||
388 | static inline void bsd_ring_advance_ring(struct drm_device *dev, | ||
389 | struct intel_ring_buffer *ring) | ||
390 | { | ||
391 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
392 | I915_WRITE(BSD_RING_TAIL, ring->tail); | ||
393 | } | ||
394 | |||
395 | static int init_bsd_ring(struct drm_device *dev, | 379 | static int init_bsd_ring(struct drm_device *dev, |
396 | struct intel_ring_buffer *ring) | 380 | struct intel_ring_buffer *ring) |
397 | { | 381 | { |
398 | return init_ring_common(dev, ring); | 382 | return init_ring_common(dev, ring); |
399 | } | 383 | } |
400 | 384 | ||
401 | static u32 | 385 | static u32 |
402 | bsd_ring_add_request(struct drm_device *dev, | 386 | ring_add_request(struct drm_device *dev, |
403 | struct intel_ring_buffer *ring, | 387 | struct intel_ring_buffer *ring, |
404 | struct drm_file *file_priv, | 388 | u32 flush_domains) |
405 | u32 flush_domains) | ||
406 | { | 389 | { |
407 | u32 seqno; | 390 | u32 seqno; |
408 | seqno = intel_ring_get_seqno(dev, ring); | 391 | |
392 | seqno = i915_gem_get_seqno(dev); | ||
393 | |||
409 | intel_ring_begin(dev, ring, 4); | 394 | intel_ring_begin(dev, ring, 4); |
410 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | 395 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); |
411 | intel_ring_emit(dev, ring, | 396 | intel_ring_emit(dev, ring, |
@@ -419,40 +404,32 @@ bsd_ring_add_request(struct drm_device *dev, | |||
419 | return seqno; | 404 | return seqno; |
420 | } | 405 | } |
421 | 406 | ||
422 | static void bsd_setup_status_page(struct drm_device *dev, | ||
423 | struct intel_ring_buffer *ring) | ||
424 | { | ||
425 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
426 | I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr); | ||
427 | I915_READ(BSD_HWS_PGA); | ||
428 | } | ||
429 | |||
430 | static void | 407 | static void |
431 | bsd_ring_get_user_irq(struct drm_device *dev, | 408 | bsd_ring_get_user_irq(struct drm_device *dev, |
432 | struct intel_ring_buffer *ring) | 409 | struct intel_ring_buffer *ring) |
433 | { | 410 | { |
434 | /* do nothing */ | 411 | /* do nothing */ |
435 | } | 412 | } |
436 | static void | 413 | static void |
437 | bsd_ring_put_user_irq(struct drm_device *dev, | 414 | bsd_ring_put_user_irq(struct drm_device *dev, |
438 | struct intel_ring_buffer *ring) | 415 | struct intel_ring_buffer *ring) |
439 | { | 416 | { |
440 | /* do nothing */ | 417 | /* do nothing */ |
441 | } | 418 | } |
442 | 419 | ||
443 | static u32 | 420 | static u32 |
444 | bsd_ring_get_gem_seqno(struct drm_device *dev, | 421 | ring_status_page_get_seqno(struct drm_device *dev, |
445 | struct intel_ring_buffer *ring) | 422 | struct intel_ring_buffer *ring) |
446 | { | 423 | { |
447 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 424 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
448 | } | 425 | } |
449 | 426 | ||
450 | static int | 427 | static int |
451 | bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 428 | ring_dispatch_gem_execbuffer(struct drm_device *dev, |
452 | struct intel_ring_buffer *ring, | 429 | struct intel_ring_buffer *ring, |
453 | struct drm_i915_gem_execbuffer2 *exec, | 430 | struct drm_i915_gem_execbuffer2 *exec, |
454 | struct drm_clip_rect *cliprects, | 431 | struct drm_clip_rect *cliprects, |
455 | uint64_t exec_offset) | 432 | uint64_t exec_offset) |
456 | { | 433 | { |
457 | uint32_t exec_start; | 434 | uint32_t exec_start; |
458 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 435 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
@@ -464,13 +441,12 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
464 | return 0; | 441 | return 0; |
465 | } | 442 | } |
466 | 443 | ||
467 | |||
468 | static int | 444 | static int |
469 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 445 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, |
470 | struct intel_ring_buffer *ring, | 446 | struct intel_ring_buffer *ring, |
471 | struct drm_i915_gem_execbuffer2 *exec, | 447 | struct drm_i915_gem_execbuffer2 *exec, |
472 | struct drm_clip_rect *cliprects, | 448 | struct drm_clip_rect *cliprects, |
473 | uint64_t exec_offset) | 449 | uint64_t exec_offset) |
474 | { | 450 | { |
475 | drm_i915_private_t *dev_priv = dev->dev_private; | 451 | drm_i915_private_t *dev_priv = dev->dev_private; |
476 | int nbox = exec->num_cliprects; | 452 | int nbox = exec->num_cliprects; |
@@ -479,7 +455,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
479 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 455 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
480 | exec_len = (uint32_t) exec->batch_len; | 456 | exec_len = (uint32_t) exec->batch_len; |
481 | 457 | ||
482 | trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); | 458 | trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); |
483 | 459 | ||
484 | count = nbox ? nbox : 1; | 460 | count = nbox ? nbox : 1; |
485 | 461 | ||
@@ -499,8 +475,8 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
499 | intel_ring_emit(dev, ring, exec_start + exec_len - 4); | 475 | intel_ring_emit(dev, ring, exec_start + exec_len - 4); |
500 | intel_ring_emit(dev, ring, 0); | 476 | intel_ring_emit(dev, ring, 0); |
501 | } else { | 477 | } else { |
502 | intel_ring_begin(dev, ring, 4); | 478 | intel_ring_begin(dev, ring, 2); |
503 | if (IS_I965G(dev)) { | 479 | if (INTEL_INFO(dev)->gen >= 4) { |
504 | intel_ring_emit(dev, ring, | 480 | intel_ring_emit(dev, ring, |
505 | MI_BATCH_BUFFER_START | (2 << 6) | 481 | MI_BATCH_BUFFER_START | (2 << 6) |
506 | | MI_BATCH_NON_SECURE_I965); | 482 | | MI_BATCH_NON_SECURE_I965); |
@@ -515,12 +491,21 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
515 | intel_ring_advance(dev, ring); | 491 | intel_ring_advance(dev, ring); |
516 | } | 492 | } |
517 | 493 | ||
494 | if (IS_G4X(dev) || IS_GEN5(dev)) { | ||
495 | intel_ring_begin(dev, ring, 2); | ||
496 | intel_ring_emit(dev, ring, MI_FLUSH | | ||
497 | MI_NO_WRITE_FLUSH | | ||
498 | MI_INVALIDATE_ISP ); | ||
499 | intel_ring_emit(dev, ring, MI_NOOP); | ||
500 | intel_ring_advance(dev, ring); | ||
501 | } | ||
518 | /* XXX breadcrumb */ | 502 | /* XXX breadcrumb */ |
503 | |||
519 | return 0; | 504 | return 0; |
520 | } | 505 | } |
521 | 506 | ||
522 | static void cleanup_status_page(struct drm_device *dev, | 507 | static void cleanup_status_page(struct drm_device *dev, |
523 | struct intel_ring_buffer *ring) | 508 | struct intel_ring_buffer *ring) |
524 | { | 509 | { |
525 | drm_i915_private_t *dev_priv = dev->dev_private; | 510 | drm_i915_private_t *dev_priv = dev->dev_private; |
526 | struct drm_gem_object *obj; | 511 | struct drm_gem_object *obj; |
@@ -540,7 +525,7 @@ static void cleanup_status_page(struct drm_device *dev, | |||
540 | } | 525 | } |
541 | 526 | ||
542 | static int init_status_page(struct drm_device *dev, | 527 | static int init_status_page(struct drm_device *dev, |
543 | struct intel_ring_buffer *ring) | 528 | struct intel_ring_buffer *ring) |
544 | { | 529 | { |
545 | drm_i915_private_t *dev_priv = dev->dev_private; | 530 | drm_i915_private_t *dev_priv = dev->dev_private; |
546 | struct drm_gem_object *obj; | 531 | struct drm_gem_object *obj; |
@@ -570,7 +555,7 @@ static int init_status_page(struct drm_device *dev, | |||
570 | ring->status_page.obj = obj; | 555 | ring->status_page.obj = obj; |
571 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 556 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
572 | 557 | ||
573 | ring->setup_status_page(dev, ring); | 558 | intel_ring_setup_status_page(dev, ring); |
574 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | 559 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
575 | ring->name, ring->status_page.gfx_addr); | 560 | ring->name, ring->status_page.gfx_addr); |
576 | 561 | ||
@@ -584,14 +569,18 @@ err: | |||
584 | return ret; | 569 | return ret; |
585 | } | 570 | } |
586 | 571 | ||
587 | |||
588 | int intel_init_ring_buffer(struct drm_device *dev, | 572 | int intel_init_ring_buffer(struct drm_device *dev, |
589 | struct intel_ring_buffer *ring) | 573 | struct intel_ring_buffer *ring) |
590 | { | 574 | { |
591 | int ret; | 575 | struct drm_i915_private *dev_priv = dev->dev_private; |
592 | struct drm_i915_gem_object *obj_priv; | 576 | struct drm_i915_gem_object *obj_priv; |
593 | struct drm_gem_object *obj; | 577 | struct drm_gem_object *obj; |
578 | int ret; | ||
579 | |||
594 | ring->dev = dev; | 580 | ring->dev = dev; |
581 | INIT_LIST_HEAD(&ring->active_list); | ||
582 | INIT_LIST_HEAD(&ring->request_list); | ||
583 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
595 | 584 | ||
596 | if (I915_NEED_GFX_HWS(dev)) { | 585 | if (I915_NEED_GFX_HWS(dev)) { |
597 | ret = init_status_page(dev, ring); | 586 | ret = init_status_page(dev, ring); |
@@ -603,16 +592,14 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
603 | if (obj == NULL) { | 592 | if (obj == NULL) { |
604 | DRM_ERROR("Failed to allocate ringbuffer\n"); | 593 | DRM_ERROR("Failed to allocate ringbuffer\n"); |
605 | ret = -ENOMEM; | 594 | ret = -ENOMEM; |
606 | goto cleanup; | 595 | goto err_hws; |
607 | } | 596 | } |
608 | 597 | ||
609 | ring->gem_object = obj; | 598 | ring->gem_object = obj; |
610 | 599 | ||
611 | ret = i915_gem_object_pin(obj, ring->alignment); | 600 | ret = i915_gem_object_pin(obj, PAGE_SIZE); |
612 | if (ret != 0) { | 601 | if (ret) |
613 | drm_gem_object_unreference(obj); | 602 | goto err_unref; |
614 | goto cleanup; | ||
615 | } | ||
616 | 603 | ||
617 | obj_priv = to_intel_bo(obj); | 604 | obj_priv = to_intel_bo(obj); |
618 | ring->map.size = ring->size; | 605 | ring->map.size = ring->size; |
@@ -624,38 +611,40 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
624 | drm_core_ioremap_wc(&ring->map, dev); | 611 | drm_core_ioremap_wc(&ring->map, dev); |
625 | if (ring->map.handle == NULL) { | 612 | if (ring->map.handle == NULL) { |
626 | DRM_ERROR("Failed to map ringbuffer.\n"); | 613 | DRM_ERROR("Failed to map ringbuffer.\n"); |
627 | i915_gem_object_unpin(obj); | ||
628 | drm_gem_object_unreference(obj); | ||
629 | ret = -EINVAL; | 614 | ret = -EINVAL; |
630 | goto cleanup; | 615 | goto err_unpin; |
631 | } | 616 | } |
632 | 617 | ||
633 | ring->virtual_start = ring->map.handle; | 618 | ring->virtual_start = ring->map.handle; |
634 | ret = ring->init(dev, ring); | 619 | ret = ring->init(dev, ring); |
635 | if (ret != 0) { | 620 | if (ret) |
636 | intel_cleanup_ring_buffer(dev, ring); | 621 | goto err_unmap; |
637 | return ret; | ||
638 | } | ||
639 | 622 | ||
640 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 623 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
641 | i915_kernel_lost_context(dev); | 624 | i915_kernel_lost_context(dev); |
642 | else { | 625 | else { |
643 | ring->head = ring->get_head(dev, ring); | 626 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
644 | ring->tail = ring->get_tail(dev, ring); | 627 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
645 | ring->space = ring->head - (ring->tail + 8); | 628 | ring->space = ring->head - (ring->tail + 8); |
646 | if (ring->space < 0) | 629 | if (ring->space < 0) |
647 | ring->space += ring->size; | 630 | ring->space += ring->size; |
648 | } | 631 | } |
649 | INIT_LIST_HEAD(&ring->active_list); | ||
650 | INIT_LIST_HEAD(&ring->request_list); | ||
651 | return ret; | 632 | return ret; |
652 | cleanup: | 633 | |
634 | err_unmap: | ||
635 | drm_core_ioremapfree(&ring->map, dev); | ||
636 | err_unpin: | ||
637 | i915_gem_object_unpin(obj); | ||
638 | err_unref: | ||
639 | drm_gem_object_unreference(obj); | ||
640 | ring->gem_object = NULL; | ||
641 | err_hws: | ||
653 | cleanup_status_page(dev, ring); | 642 | cleanup_status_page(dev, ring); |
654 | return ret; | 643 | return ret; |
655 | } | 644 | } |
656 | 645 | ||
657 | void intel_cleanup_ring_buffer(struct drm_device *dev, | 646 | void intel_cleanup_ring_buffer(struct drm_device *dev, |
658 | struct intel_ring_buffer *ring) | 647 | struct intel_ring_buffer *ring) |
659 | { | 648 | { |
660 | if (ring->gem_object == NULL) | 649 | if (ring->gem_object == NULL) |
661 | return; | 650 | return; |
@@ -668,8 +657,8 @@ void intel_cleanup_ring_buffer(struct drm_device *dev, | |||
668 | cleanup_status_page(dev, ring); | 657 | cleanup_status_page(dev, ring); |
669 | } | 658 | } |
670 | 659 | ||
671 | int intel_wrap_ring_buffer(struct drm_device *dev, | 660 | static int intel_wrap_ring_buffer(struct drm_device *dev, |
672 | struct intel_ring_buffer *ring) | 661 | struct intel_ring_buffer *ring) |
673 | { | 662 | { |
674 | unsigned int *virt; | 663 | unsigned int *virt; |
675 | int rem; | 664 | int rem; |
@@ -682,9 +671,11 @@ int intel_wrap_ring_buffer(struct drm_device *dev, | |||
682 | } | 671 | } |
683 | 672 | ||
684 | virt = (unsigned int *)(ring->virtual_start + ring->tail); | 673 | virt = (unsigned int *)(ring->virtual_start + ring->tail); |
685 | rem /= 4; | 674 | rem /= 8; |
686 | while (rem--) | 675 | while (rem--) { |
687 | *virt++ = MI_NOOP; | 676 | *virt++ = MI_NOOP; |
677 | *virt++ = MI_NOOP; | ||
678 | } | ||
688 | 679 | ||
689 | ring->tail = 0; | 680 | ring->tail = 0; |
690 | ring->space = ring->head - 8; | 681 | ring->space = ring->head - 8; |
@@ -693,14 +684,15 @@ int intel_wrap_ring_buffer(struct drm_device *dev, | |||
693 | } | 684 | } |
694 | 685 | ||
695 | int intel_wait_ring_buffer(struct drm_device *dev, | 686 | int intel_wait_ring_buffer(struct drm_device *dev, |
696 | struct intel_ring_buffer *ring, int n) | 687 | struct intel_ring_buffer *ring, int n) |
697 | { | 688 | { |
698 | unsigned long end; | 689 | unsigned long end; |
690 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
699 | 691 | ||
700 | trace_i915_ring_wait_begin (dev); | 692 | trace_i915_ring_wait_begin (dev); |
701 | end = jiffies + 3 * HZ; | 693 | end = jiffies + 3 * HZ; |
702 | do { | 694 | do { |
703 | ring->head = ring->get_head(dev, ring); | 695 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
704 | ring->space = ring->head - (ring->tail + 8); | 696 | ring->space = ring->head - (ring->tail + 8); |
705 | if (ring->space < 0) | 697 | if (ring->space < 0) |
706 | ring->space += ring->size; | 698 | ring->space += ring->size; |
@@ -715,137 +707,201 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
715 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | 707 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
716 | } | 708 | } |
717 | 709 | ||
718 | yield(); | 710 | msleep(1); |
719 | } while (!time_after(jiffies, end)); | 711 | } while (!time_after(jiffies, end)); |
720 | trace_i915_ring_wait_end (dev); | 712 | trace_i915_ring_wait_end (dev); |
721 | return -EBUSY; | 713 | return -EBUSY; |
722 | } | 714 | } |
723 | 715 | ||
724 | void intel_ring_begin(struct drm_device *dev, | 716 | void intel_ring_begin(struct drm_device *dev, |
725 | struct intel_ring_buffer *ring, int num_dwords) | 717 | struct intel_ring_buffer *ring, |
718 | int num_dwords) | ||
726 | { | 719 | { |
727 | int n = 4*num_dwords; | 720 | int n = 4*num_dwords; |
728 | if (unlikely(ring->tail + n > ring->size)) | 721 | if (unlikely(ring->tail + n > ring->size)) |
729 | intel_wrap_ring_buffer(dev, ring); | 722 | intel_wrap_ring_buffer(dev, ring); |
730 | if (unlikely(ring->space < n)) | 723 | if (unlikely(ring->space < n)) |
731 | intel_wait_ring_buffer(dev, ring, n); | 724 | intel_wait_ring_buffer(dev, ring, n); |
732 | } | ||
733 | 725 | ||
734 | void intel_ring_emit(struct drm_device *dev, | 726 | ring->space -= n; |
735 | struct intel_ring_buffer *ring, unsigned int data) | ||
736 | { | ||
737 | unsigned int *virt = ring->virtual_start + ring->tail; | ||
738 | *virt = data; | ||
739 | ring->tail += 4; | ||
740 | ring->tail &= ring->size - 1; | ||
741 | ring->space -= 4; | ||
742 | } | 727 | } |
743 | 728 | ||
744 | void intel_ring_advance(struct drm_device *dev, | 729 | void intel_ring_advance(struct drm_device *dev, |
745 | struct intel_ring_buffer *ring) | 730 | struct intel_ring_buffer *ring) |
746 | { | 731 | { |
747 | ring->advance_ring(dev, ring); | ||
748 | } | ||
749 | |||
750 | void intel_fill_struct(struct drm_device *dev, | ||
751 | struct intel_ring_buffer *ring, | ||
752 | void *data, | ||
753 | unsigned int len) | ||
754 | { | ||
755 | unsigned int *virt = ring->virtual_start + ring->tail; | ||
756 | BUG_ON((len&~(4-1)) != 0); | ||
757 | intel_ring_begin(dev, ring, len/4); | ||
758 | memcpy(virt, data, len); | ||
759 | ring->tail += len; | ||
760 | ring->tail &= ring->size - 1; | 732 | ring->tail &= ring->size - 1; |
761 | ring->space -= len; | 733 | ring->write_tail(dev, ring, ring->tail); |
762 | intel_ring_advance(dev, ring); | ||
763 | } | 734 | } |
764 | 735 | ||
765 | u32 intel_ring_get_seqno(struct drm_device *dev, | 736 | static const struct intel_ring_buffer render_ring = { |
766 | struct intel_ring_buffer *ring) | ||
767 | { | ||
768 | u32 seqno; | ||
769 | seqno = ring->next_seqno; | ||
770 | |||
771 | /* reserve 0 for non-seqno */ | ||
772 | if (++ring->next_seqno == 0) | ||
773 | ring->next_seqno = 1; | ||
774 | return seqno; | ||
775 | } | ||
776 | |||
777 | struct intel_ring_buffer render_ring = { | ||
778 | .name = "render ring", | 737 | .name = "render ring", |
779 | .regs = { | 738 | .id = RING_RENDER, |
780 | .ctl = PRB0_CTL, | 739 | .mmio_base = RENDER_RING_BASE, |
781 | .head = PRB0_HEAD, | ||
782 | .tail = PRB0_TAIL, | ||
783 | .start = PRB0_START | ||
784 | }, | ||
785 | .ring_flag = I915_EXEC_RENDER, | ||
786 | .size = 32 * PAGE_SIZE, | 740 | .size = 32 * PAGE_SIZE, |
787 | .alignment = PAGE_SIZE, | ||
788 | .virtual_start = NULL, | ||
789 | .dev = NULL, | ||
790 | .gem_object = NULL, | ||
791 | .head = 0, | ||
792 | .tail = 0, | ||
793 | .space = 0, | ||
794 | .next_seqno = 1, | ||
795 | .user_irq_refcount = 0, | ||
796 | .irq_gem_seqno = 0, | ||
797 | .waiting_gem_seqno = 0, | ||
798 | .setup_status_page = render_setup_status_page, | ||
799 | .init = init_render_ring, | 741 | .init = init_render_ring, |
800 | .get_head = render_ring_get_head, | 742 | .write_tail = ring_write_tail, |
801 | .get_tail = render_ring_get_tail, | ||
802 | .get_active_head = render_ring_get_active_head, | ||
803 | .advance_ring = render_ring_advance_ring, | ||
804 | .flush = render_ring_flush, | 743 | .flush = render_ring_flush, |
805 | .add_request = render_ring_add_request, | 744 | .add_request = render_ring_add_request, |
806 | .get_gem_seqno = render_ring_get_gem_seqno, | 745 | .get_seqno = render_ring_get_seqno, |
807 | .user_irq_get = render_ring_get_user_irq, | 746 | .user_irq_get = render_ring_get_user_irq, |
808 | .user_irq_put = render_ring_put_user_irq, | 747 | .user_irq_put = render_ring_put_user_irq, |
809 | .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, | 748 | .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, |
810 | .status_page = {NULL, 0, NULL}, | ||
811 | .map = {0,} | ||
812 | }; | 749 | }; |
813 | 750 | ||
814 | /* ring buffer for bit-stream decoder */ | 751 | /* ring buffer for bit-stream decoder */ |
815 | 752 | ||
816 | struct intel_ring_buffer bsd_ring = { | 753 | static const struct intel_ring_buffer bsd_ring = { |
817 | .name = "bsd ring", | 754 | .name = "bsd ring", |
818 | .regs = { | 755 | .id = RING_BSD, |
819 | .ctl = BSD_RING_CTL, | 756 | .mmio_base = BSD_RING_BASE, |
820 | .head = BSD_RING_HEAD, | ||
821 | .tail = BSD_RING_TAIL, | ||
822 | .start = BSD_RING_START | ||
823 | }, | ||
824 | .ring_flag = I915_EXEC_BSD, | ||
825 | .size = 32 * PAGE_SIZE, | 757 | .size = 32 * PAGE_SIZE, |
826 | .alignment = PAGE_SIZE, | ||
827 | .virtual_start = NULL, | ||
828 | .dev = NULL, | ||
829 | .gem_object = NULL, | ||
830 | .head = 0, | ||
831 | .tail = 0, | ||
832 | .space = 0, | ||
833 | .next_seqno = 1, | ||
834 | .user_irq_refcount = 0, | ||
835 | .irq_gem_seqno = 0, | ||
836 | .waiting_gem_seqno = 0, | ||
837 | .setup_status_page = bsd_setup_status_page, | ||
838 | .init = init_bsd_ring, | 758 | .init = init_bsd_ring, |
839 | .get_head = bsd_ring_get_head, | 759 | .write_tail = ring_write_tail, |
840 | .get_tail = bsd_ring_get_tail, | ||
841 | .get_active_head = bsd_ring_get_active_head, | ||
842 | .advance_ring = bsd_ring_advance_ring, | ||
843 | .flush = bsd_ring_flush, | 760 | .flush = bsd_ring_flush, |
844 | .add_request = bsd_ring_add_request, | 761 | .add_request = ring_add_request, |
845 | .get_gem_seqno = bsd_ring_get_gem_seqno, | 762 | .get_seqno = ring_status_page_get_seqno, |
846 | .user_irq_get = bsd_ring_get_user_irq, | 763 | .user_irq_get = bsd_ring_get_user_irq, |
847 | .user_irq_put = bsd_ring_put_user_irq, | 764 | .user_irq_put = bsd_ring_put_user_irq, |
848 | .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, | 765 | .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, |
849 | .status_page = {NULL, 0, NULL}, | ||
850 | .map = {0,} | ||
851 | }; | 766 | }; |
767 | |||
768 | |||
769 | static void gen6_bsd_ring_write_tail(struct drm_device *dev, | ||
770 | struct intel_ring_buffer *ring, | ||
771 | u32 value) | ||
772 | { | ||
773 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
774 | |||
775 | /* Every tail move must follow the sequence below */ | ||
776 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | ||
777 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | ||
778 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); | ||
779 | I915_WRITE(GEN6_BSD_RNCID, 0x0); | ||
780 | |||
781 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & | ||
782 | GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, | ||
783 | 50)) | ||
784 | DRM_ERROR("timed out waiting for IDLE Indicator\n"); | ||
785 | |||
786 | I915_WRITE_TAIL(ring, value); | ||
787 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | ||
788 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | | ||
789 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | ||
790 | } | ||
791 | |||
792 | static void gen6_ring_flush(struct drm_device *dev, | ||
793 | struct intel_ring_buffer *ring, | ||
794 | u32 invalidate_domains, | ||
795 | u32 flush_domains) | ||
796 | { | ||
797 | intel_ring_begin(dev, ring, 4); | ||
798 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | ||
799 | intel_ring_emit(dev, ring, 0); | ||
800 | intel_ring_emit(dev, ring, 0); | ||
801 | intel_ring_emit(dev, ring, 0); | ||
802 | intel_ring_advance(dev, ring); | ||
803 | } | ||
804 | |||
805 | static int | ||
806 | gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, | ||
807 | struct intel_ring_buffer *ring, | ||
808 | struct drm_i915_gem_execbuffer2 *exec, | ||
809 | struct drm_clip_rect *cliprects, | ||
810 | uint64_t exec_offset) | ||
811 | { | ||
812 | uint32_t exec_start; | ||
813 | |||
814 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
815 | |||
816 | intel_ring_begin(dev, ring, 2); | ||
817 | intel_ring_emit(dev, ring, | ||
818 | MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); | ||
819 | /* bit0-7 is the length on GEN6+ */ | ||
820 | intel_ring_emit(dev, ring, exec_start); | ||
821 | intel_ring_advance(dev, ring); | ||
822 | |||
823 | return 0; | ||
824 | } | ||
825 | |||
826 | /* ring buffer for Video Codec for Gen6+ */ | ||
827 | static const struct intel_ring_buffer gen6_bsd_ring = { | ||
828 | .name = "gen6 bsd ring", | ||
829 | .id = RING_BSD, | ||
830 | .mmio_base = GEN6_BSD_RING_BASE, | ||
831 | .size = 32 * PAGE_SIZE, | ||
832 | .init = init_bsd_ring, | ||
833 | .write_tail = gen6_bsd_ring_write_tail, | ||
834 | .flush = gen6_ring_flush, | ||
835 | .add_request = ring_add_request, | ||
836 | .get_seqno = ring_status_page_get_seqno, | ||
837 | .user_irq_get = bsd_ring_get_user_irq, | ||
838 | .user_irq_put = bsd_ring_put_user_irq, | ||
839 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | ||
840 | }; | ||
841 | |||
842 | /* Blitter support (SandyBridge+) */ | ||
843 | |||
844 | static void | ||
845 | blt_ring_get_user_irq(struct drm_device *dev, | ||
846 | struct intel_ring_buffer *ring) | ||
847 | { | ||
848 | /* do nothing */ | ||
849 | } | ||
850 | static void | ||
851 | blt_ring_put_user_irq(struct drm_device *dev, | ||
852 | struct intel_ring_buffer *ring) | ||
853 | { | ||
854 | /* do nothing */ | ||
855 | } | ||
856 | |||
857 | static const struct intel_ring_buffer gen6_blt_ring = { | ||
858 | .name = "blt ring", | ||
859 | .id = RING_BLT, | ||
860 | .mmio_base = BLT_RING_BASE, | ||
861 | .size = 32 * PAGE_SIZE, | ||
862 | .init = init_ring_common, | ||
863 | .write_tail = ring_write_tail, | ||
864 | .flush = gen6_ring_flush, | ||
865 | .add_request = ring_add_request, | ||
866 | .get_seqno = ring_status_page_get_seqno, | ||
867 | .user_irq_get = blt_ring_get_user_irq, | ||
868 | .user_irq_put = blt_ring_put_user_irq, | ||
869 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | ||
870 | }; | ||
871 | |||
872 | int intel_init_render_ring_buffer(struct drm_device *dev) | ||
873 | { | ||
874 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
875 | |||
876 | dev_priv->render_ring = render_ring; | ||
877 | |||
878 | if (!I915_NEED_GFX_HWS(dev)) { | ||
879 | dev_priv->render_ring.status_page.page_addr | ||
880 | = dev_priv->status_page_dmah->vaddr; | ||
881 | memset(dev_priv->render_ring.status_page.page_addr, | ||
882 | 0, PAGE_SIZE); | ||
883 | } | ||
884 | |||
885 | return intel_init_ring_buffer(dev, &dev_priv->render_ring); | ||
886 | } | ||
887 | |||
888 | int intel_init_bsd_ring_buffer(struct drm_device *dev) | ||
889 | { | ||
890 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
891 | |||
892 | if (IS_GEN6(dev)) | ||
893 | dev_priv->bsd_ring = gen6_bsd_ring; | ||
894 | else | ||
895 | dev_priv->bsd_ring = bsd_ring; | ||
896 | |||
897 | return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); | ||
898 | } | ||
899 | |||
900 | int intel_init_blt_ring_buffer(struct drm_device *dev) | ||
901 | { | ||
902 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
903 | |||
904 | dev_priv->blt_ring = gen6_blt_ring; | ||
905 | |||
906 | return intel_init_ring_buffer(dev, &dev_priv->blt_ring); | ||
907 | } | ||