diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-10-27 07:18:21 -0400 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2010-10-27 07:18:21 -0400 |
commit | 78501eac34f372bfbeb4e1d9de688c13efa916f6 (patch) | |
tree | a490359ac69c394149362e6571a37189ee264739 /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | dd2b379f071424f36f9f90ff83cb4ad058c7b6ed (diff) |
drm/i915/ringbuffer: Drop the redundant dev from the vfunc interface
The ringbuffer keeps a pointer to the parent device, so we can use that
instead of passing around the pointer on the stack.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 378 |
1 files changed, 176 insertions, 202 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 09f2dc353ae2..d6eba661105f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -49,11 +49,11 @@ static u32 i915_gem_get_seqno(struct drm_device *dev) | |||
49 | } | 49 | } |
50 | 50 | ||
51 | static void | 51 | static void |
52 | render_ring_flush(struct drm_device *dev, | 52 | render_ring_flush(struct intel_ring_buffer *ring, |
53 | struct intel_ring_buffer *ring, | ||
54 | u32 invalidate_domains, | 53 | u32 invalidate_domains, |
55 | u32 flush_domains) | 54 | u32 flush_domains) |
56 | { | 55 | { |
56 | struct drm_device *dev = ring->dev; | ||
57 | drm_i915_private_t *dev_priv = dev->dev_private; | 57 | drm_i915_private_t *dev_priv = dev->dev_private; |
58 | u32 cmd; | 58 | u32 cmd; |
59 | 59 | ||
@@ -112,43 +112,39 @@ render_ring_flush(struct drm_device *dev, | |||
112 | #if WATCH_EXEC | 112 | #if WATCH_EXEC |
113 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | 113 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); |
114 | #endif | 114 | #endif |
115 | intel_ring_begin(dev, ring, 2); | 115 | intel_ring_begin(ring, 2); |
116 | intel_ring_emit(dev, ring, cmd); | 116 | intel_ring_emit(ring, cmd); |
117 | intel_ring_emit(dev, ring, MI_NOOP); | 117 | intel_ring_emit(ring, MI_NOOP); |
118 | intel_ring_advance(dev, ring); | 118 | intel_ring_advance(ring); |
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
122 | static void ring_write_tail(struct drm_device *dev, | 122 | static void ring_write_tail(struct intel_ring_buffer *ring, |
123 | struct intel_ring_buffer *ring, | ||
124 | u32 value) | 123 | u32 value) |
125 | { | 124 | { |
126 | drm_i915_private_t *dev_priv = dev->dev_private; | 125 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
127 | I915_WRITE_TAIL(ring, value); | 126 | I915_WRITE_TAIL(ring, value); |
128 | } | 127 | } |
129 | 128 | ||
130 | u32 intel_ring_get_active_head(struct drm_device *dev, | 129 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) |
131 | struct intel_ring_buffer *ring) | ||
132 | { | 130 | { |
133 | drm_i915_private_t *dev_priv = dev->dev_private; | 131 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
134 | u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? | 132 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? |
135 | RING_ACTHD(ring->mmio_base) : ACTHD; | 133 | RING_ACTHD(ring->mmio_base) : ACTHD; |
136 | 134 | ||
137 | return I915_READ(acthd_reg); | 135 | return I915_READ(acthd_reg); |
138 | } | 136 | } |
139 | 137 | ||
140 | static int init_ring_common(struct drm_device *dev, | 138 | static int init_ring_common(struct intel_ring_buffer *ring) |
141 | struct intel_ring_buffer *ring) | ||
142 | { | 139 | { |
140 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||
141 | struct drm_i915_gem_object *obj_priv = to_intel_bo(ring->gem_object); | ||
143 | u32 head; | 142 | u32 head; |
144 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
145 | struct drm_i915_gem_object *obj_priv; | ||
146 | obj_priv = to_intel_bo(ring->gem_object); | ||
147 | 143 | ||
148 | /* Stop the ring if it's running. */ | 144 | /* Stop the ring if it's running. */ |
149 | I915_WRITE_CTL(ring, 0); | 145 | I915_WRITE_CTL(ring, 0); |
150 | I915_WRITE_HEAD(ring, 0); | 146 | I915_WRITE_HEAD(ring, 0); |
151 | ring->write_tail(dev, ring, 0); | 147 | ring->write_tail(ring, 0); |
152 | 148 | ||
153 | /* Initialize the ring. */ | 149 | /* Initialize the ring. */ |
154 | I915_WRITE_START(ring, obj_priv->gtt_offset); | 150 | I915_WRITE_START(ring, obj_priv->gtt_offset); |
@@ -192,8 +188,8 @@ static int init_ring_common(struct drm_device *dev, | |||
192 | return -EIO; | 188 | return -EIO; |
193 | } | 189 | } |
194 | 190 | ||
195 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 191 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) |
196 | i915_kernel_lost_context(dev); | 192 | i915_kernel_lost_context(ring->dev); |
197 | else { | 193 | else { |
198 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; | 194 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
199 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 195 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
@@ -204,29 +200,29 @@ static int init_ring_common(struct drm_device *dev, | |||
204 | return 0; | 200 | return 0; |
205 | } | 201 | } |
206 | 202 | ||
207 | static int init_render_ring(struct drm_device *dev, | 203 | static int init_render_ring(struct intel_ring_buffer *ring) |
208 | struct intel_ring_buffer *ring) | ||
209 | { | 204 | { |
210 | drm_i915_private_t *dev_priv = dev->dev_private; | 205 | struct drm_device *dev = ring->dev; |
211 | int ret = init_ring_common(dev, ring); | 206 | int ret = init_ring_common(ring); |
212 | int mode; | ||
213 | 207 | ||
214 | if (INTEL_INFO(dev)->gen > 3) { | 208 | if (INTEL_INFO(dev)->gen > 3) { |
215 | mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; | 209 | drm_i915_private_t *dev_priv = dev->dev_private; |
210 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; | ||
216 | if (IS_GEN6(dev)) | 211 | if (IS_GEN6(dev)) |
217 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | 212 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
218 | I915_WRITE(MI_MODE, mode); | 213 | I915_WRITE(MI_MODE, mode); |
219 | } | 214 | } |
215 | |||
220 | return ret; | 216 | return ret; |
221 | } | 217 | } |
222 | 218 | ||
223 | #define PIPE_CONTROL_FLUSH(addr) \ | 219 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
224 | do { \ | 220 | do { \ |
225 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | 221 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ |
226 | PIPE_CONTROL_DEPTH_STALL | 2); \ | 222 | PIPE_CONTROL_DEPTH_STALL | 2); \ |
227 | OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ | 223 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
228 | OUT_RING(0); \ | 224 | intel_ring_emit(ring__, 0); \ |
229 | OUT_RING(0); \ | 225 | intel_ring_emit(ring__, 0); \ |
230 | } while (0) | 226 | } while (0) |
231 | 227 | ||
232 | /** | 228 | /** |
@@ -238,26 +234,26 @@ do { \ | |||
238 | * Returned sequence numbers are nonzero on success. | 234 | * Returned sequence numbers are nonzero on success. |
239 | */ | 235 | */ |
240 | static u32 | 236 | static u32 |
241 | render_ring_add_request(struct drm_device *dev, | 237 | render_ring_add_request(struct intel_ring_buffer *ring, |
242 | struct intel_ring_buffer *ring, | ||
243 | u32 flush_domains) | 238 | u32 flush_domains) |
244 | { | 239 | { |
240 | struct drm_device *dev = ring->dev; | ||
245 | drm_i915_private_t *dev_priv = dev->dev_private; | 241 | drm_i915_private_t *dev_priv = dev->dev_private; |
246 | u32 seqno; | 242 | u32 seqno; |
247 | 243 | ||
248 | seqno = i915_gem_get_seqno(dev); | 244 | seqno = i915_gem_get_seqno(dev); |
249 | 245 | ||
250 | if (IS_GEN6(dev)) { | 246 | if (IS_GEN6(dev)) { |
251 | BEGIN_LP_RING(6); | 247 | intel_ring_begin(ring, 6); |
252 | OUT_RING(GFX_OP_PIPE_CONTROL | 3); | 248 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); |
253 | OUT_RING(PIPE_CONTROL_QW_WRITE | | 249 | intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | |
254 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | | 250 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | |
255 | PIPE_CONTROL_NOTIFY); | 251 | PIPE_CONTROL_NOTIFY); |
256 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | 252 | intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); |
257 | OUT_RING(seqno); | 253 | intel_ring_emit(ring, seqno); |
258 | OUT_RING(0); | 254 | intel_ring_emit(ring, 0); |
259 | OUT_RING(0); | 255 | intel_ring_emit(ring, 0); |
260 | ADVANCE_LP_RING(); | 256 | intel_ring_advance(ring); |
261 | } else if (HAS_PIPE_CONTROL(dev)) { | 257 | } else if (HAS_PIPE_CONTROL(dev)) { |
262 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; | 258 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; |
263 | 259 | ||
@@ -266,46 +262,46 @@ render_ring_add_request(struct drm_device *dev, | |||
266 | * PIPE_NOTIFY buffers out to memory before requesting | 262 | * PIPE_NOTIFY buffers out to memory before requesting |
267 | * an interrupt. | 263 | * an interrupt. |
268 | */ | 264 | */ |
269 | BEGIN_LP_RING(32); | 265 | intel_ring_begin(ring, 32); |
270 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | 266 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | |
271 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | 267 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); |
272 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | 268 | intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); |
273 | OUT_RING(seqno); | 269 | intel_ring_emit(ring, seqno); |
274 | OUT_RING(0); | 270 | intel_ring_emit(ring, 0); |
275 | PIPE_CONTROL_FLUSH(scratch_addr); | 271 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
276 | scratch_addr += 128; /* write to separate cachelines */ | 272 | scratch_addr += 128; /* write to separate cachelines */ |
277 | PIPE_CONTROL_FLUSH(scratch_addr); | 273 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
278 | scratch_addr += 128; | 274 | scratch_addr += 128; |
279 | PIPE_CONTROL_FLUSH(scratch_addr); | 275 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
280 | scratch_addr += 128; | 276 | scratch_addr += 128; |
281 | PIPE_CONTROL_FLUSH(scratch_addr); | 277 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
282 | scratch_addr += 128; | 278 | scratch_addr += 128; |
283 | PIPE_CONTROL_FLUSH(scratch_addr); | 279 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
284 | scratch_addr += 128; | 280 | scratch_addr += 128; |
285 | PIPE_CONTROL_FLUSH(scratch_addr); | 281 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
286 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | 282 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | |
287 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | 283 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | |
288 | PIPE_CONTROL_NOTIFY); | 284 | PIPE_CONTROL_NOTIFY); |
289 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | 285 | intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); |
290 | OUT_RING(seqno); | 286 | intel_ring_emit(ring, seqno); |
291 | OUT_RING(0); | 287 | intel_ring_emit(ring, 0); |
292 | ADVANCE_LP_RING(); | 288 | intel_ring_advance(ring); |
293 | } else { | 289 | } else { |
294 | BEGIN_LP_RING(4); | 290 | intel_ring_begin(ring, 4); |
295 | OUT_RING(MI_STORE_DWORD_INDEX); | 291 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
296 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 292 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
297 | OUT_RING(seqno); | 293 | intel_ring_emit(ring, seqno); |
298 | 294 | ||
299 | OUT_RING(MI_USER_INTERRUPT); | 295 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
300 | ADVANCE_LP_RING(); | 296 | intel_ring_advance(ring); |
301 | } | 297 | } |
302 | return seqno; | 298 | return seqno; |
303 | } | 299 | } |
304 | 300 | ||
305 | static u32 | 301 | static u32 |
306 | render_ring_get_seqno(struct drm_device *dev, | 302 | render_ring_get_seqno(struct intel_ring_buffer *ring) |
307 | struct intel_ring_buffer *ring) | ||
308 | { | 303 | { |
304 | struct drm_device *dev = ring->dev; | ||
309 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 305 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
310 | if (HAS_PIPE_CONTROL(dev)) | 306 | if (HAS_PIPE_CONTROL(dev)) |
311 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; | 307 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; |
@@ -314,9 +310,9 @@ render_ring_get_seqno(struct drm_device *dev, | |||
314 | } | 310 | } |
315 | 311 | ||
316 | static void | 312 | static void |
317 | render_ring_get_user_irq(struct drm_device *dev, | 313 | render_ring_get_user_irq(struct intel_ring_buffer *ring) |
318 | struct intel_ring_buffer *ring) | ||
319 | { | 314 | { |
315 | struct drm_device *dev = ring->dev; | ||
320 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 316 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
321 | unsigned long irqflags; | 317 | unsigned long irqflags; |
322 | 318 | ||
@@ -331,9 +327,9 @@ render_ring_get_user_irq(struct drm_device *dev, | |||
331 | } | 327 | } |
332 | 328 | ||
333 | static void | 329 | static void |
334 | render_ring_put_user_irq(struct drm_device *dev, | 330 | render_ring_put_user_irq(struct intel_ring_buffer *ring) |
335 | struct intel_ring_buffer *ring) | ||
336 | { | 331 | { |
332 | struct drm_device *dev = ring->dev; | ||
337 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 333 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
338 | unsigned long irqflags; | 334 | unsigned long irqflags; |
339 | 335 | ||
@@ -348,56 +344,41 @@ render_ring_put_user_irq(struct drm_device *dev, | |||
348 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 344 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
349 | } | 345 | } |
350 | 346 | ||
351 | void intel_ring_setup_status_page(struct drm_device *dev, | 347 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
352 | struct intel_ring_buffer *ring) | ||
353 | { | 348 | { |
354 | drm_i915_private_t *dev_priv = dev->dev_private; | 349 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
355 | if (IS_GEN6(dev)) { | 350 | u32 mmio = IS_GEN6(ring->dev) ? |
356 | I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), | 351 | RING_HWS_PGA_GEN6(ring->mmio_base) : |
357 | ring->status_page.gfx_addr); | 352 | RING_HWS_PGA(ring->mmio_base); |
358 | I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */ | 353 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
359 | } else { | 354 | POSTING_READ(mmio); |
360 | I915_WRITE(RING_HWS_PGA(ring->mmio_base), | ||
361 | ring->status_page.gfx_addr); | ||
362 | I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */ | ||
363 | } | ||
364 | |||
365 | } | 355 | } |
366 | 356 | ||
367 | static void | 357 | static void |
368 | bsd_ring_flush(struct drm_device *dev, | 358 | bsd_ring_flush(struct intel_ring_buffer *ring, |
369 | struct intel_ring_buffer *ring, | 359 | u32 invalidate_domains, |
370 | u32 invalidate_domains, | 360 | u32 flush_domains) |
371 | u32 flush_domains) | ||
372 | { | ||
373 | intel_ring_begin(dev, ring, 2); | ||
374 | intel_ring_emit(dev, ring, MI_FLUSH); | ||
375 | intel_ring_emit(dev, ring, MI_NOOP); | ||
376 | intel_ring_advance(dev, ring); | ||
377 | } | ||
378 | |||
379 | static int init_bsd_ring(struct drm_device *dev, | ||
380 | struct intel_ring_buffer *ring) | ||
381 | { | 361 | { |
382 | return init_ring_common(dev, ring); | 362 | intel_ring_begin(ring, 2); |
363 | intel_ring_emit(ring, MI_FLUSH); | ||
364 | intel_ring_emit(ring, MI_NOOP); | ||
365 | intel_ring_advance(ring); | ||
383 | } | 366 | } |
384 | 367 | ||
385 | static u32 | 368 | static u32 |
386 | ring_add_request(struct drm_device *dev, | 369 | ring_add_request(struct intel_ring_buffer *ring, |
387 | struct intel_ring_buffer *ring, | ||
388 | u32 flush_domains) | 370 | u32 flush_domains) |
389 | { | 371 | { |
390 | u32 seqno; | 372 | u32 seqno; |
391 | 373 | ||
392 | seqno = i915_gem_get_seqno(dev); | 374 | seqno = i915_gem_get_seqno(ring->dev); |
393 | 375 | ||
394 | intel_ring_begin(dev, ring, 4); | 376 | intel_ring_begin(ring, 4); |
395 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | 377 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
396 | intel_ring_emit(dev, ring, | 378 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
397 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 379 | intel_ring_emit(ring, seqno); |
398 | intel_ring_emit(dev, ring, seqno); | 380 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
399 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); | 381 | intel_ring_advance(ring); |
400 | intel_ring_advance(dev, ring); | ||
401 | 382 | ||
402 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | 383 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); |
403 | 384 | ||
@@ -405,53 +386,55 @@ ring_add_request(struct drm_device *dev, | |||
405 | } | 386 | } |
406 | 387 | ||
407 | static void | 388 | static void |
408 | bsd_ring_get_user_irq(struct drm_device *dev, | 389 | bsd_ring_get_user_irq(struct intel_ring_buffer *ring) |
409 | struct intel_ring_buffer *ring) | ||
410 | { | 390 | { |
411 | /* do nothing */ | 391 | /* do nothing */ |
412 | } | 392 | } |
413 | static void | 393 | static void |
414 | bsd_ring_put_user_irq(struct drm_device *dev, | 394 | bsd_ring_put_user_irq(struct intel_ring_buffer *ring) |
415 | struct intel_ring_buffer *ring) | ||
416 | { | 395 | { |
417 | /* do nothing */ | 396 | /* do nothing */ |
418 | } | 397 | } |
419 | 398 | ||
420 | static u32 | 399 | static u32 |
421 | ring_status_page_get_seqno(struct drm_device *dev, | 400 | ring_status_page_get_seqno(struct intel_ring_buffer *ring) |
422 | struct intel_ring_buffer *ring) | ||
423 | { | 401 | { |
424 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 402 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
425 | } | 403 | } |
426 | 404 | ||
427 | static int | 405 | static int |
428 | ring_dispatch_gem_execbuffer(struct drm_device *dev, | 406 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
429 | struct intel_ring_buffer *ring, | 407 | struct drm_i915_gem_execbuffer2 *exec, |
430 | struct drm_i915_gem_execbuffer2 *exec, | 408 | struct drm_clip_rect *cliprects, |
431 | struct drm_clip_rect *cliprects, | 409 | uint64_t exec_offset) |
432 | uint64_t exec_offset) | ||
433 | { | 410 | { |
434 | uint32_t exec_start; | 411 | uint32_t exec_start; |
412 | |||
435 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 413 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
436 | intel_ring_begin(dev, ring, 2); | 414 | |
437 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | | 415 | intel_ring_begin(ring, 2); |
438 | (2 << 6) | MI_BATCH_NON_SECURE_I965); | 416 | intel_ring_emit(ring, |
439 | intel_ring_emit(dev, ring, exec_start); | 417 | MI_BATCH_BUFFER_START | |
440 | intel_ring_advance(dev, ring); | 418 | (2 << 6) | |
419 | MI_BATCH_NON_SECURE_I965); | ||
420 | intel_ring_emit(ring, exec_start); | ||
421 | intel_ring_advance(ring); | ||
422 | |||
441 | return 0; | 423 | return 0; |
442 | } | 424 | } |
443 | 425 | ||
444 | static int | 426 | static int |
445 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 427 | render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
446 | struct intel_ring_buffer *ring, | 428 | struct drm_i915_gem_execbuffer2 *exec, |
447 | struct drm_i915_gem_execbuffer2 *exec, | 429 | struct drm_clip_rect *cliprects, |
448 | struct drm_clip_rect *cliprects, | 430 | uint64_t exec_offset) |
449 | uint64_t exec_offset) | ||
450 | { | 431 | { |
432 | struct drm_device *dev = ring->dev; | ||
451 | drm_i915_private_t *dev_priv = dev->dev_private; | 433 | drm_i915_private_t *dev_priv = dev->dev_private; |
452 | int nbox = exec->num_cliprects; | 434 | int nbox = exec->num_cliprects; |
453 | int i = 0, count; | 435 | int i = 0, count; |
454 | uint32_t exec_start, exec_len; | 436 | uint32_t exec_start, exec_len; |
437 | |||
455 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 438 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
456 | exec_len = (uint32_t) exec->batch_len; | 439 | exec_len = (uint32_t) exec->batch_len; |
457 | 440 | ||
@@ -468,46 +451,44 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | |||
468 | } | 451 | } |
469 | 452 | ||
470 | if (IS_I830(dev) || IS_845G(dev)) { | 453 | if (IS_I830(dev) || IS_845G(dev)) { |
471 | intel_ring_begin(dev, ring, 4); | 454 | intel_ring_begin(ring, 4); |
472 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER); | 455 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
473 | intel_ring_emit(dev, ring, | 456 | intel_ring_emit(ring, exec_start | MI_BATCH_NON_SECURE); |
474 | exec_start | MI_BATCH_NON_SECURE); | 457 | intel_ring_emit(ring, exec_start + exec_len - 4); |
475 | intel_ring_emit(dev, ring, exec_start + exec_len - 4); | 458 | intel_ring_emit(ring, 0); |
476 | intel_ring_emit(dev, ring, 0); | ||
477 | } else { | 459 | } else { |
478 | intel_ring_begin(dev, ring, 2); | 460 | intel_ring_begin(ring, 2); |
479 | if (INTEL_INFO(dev)->gen >= 4) { | 461 | if (INTEL_INFO(dev)->gen >= 4) { |
480 | intel_ring_emit(dev, ring, | 462 | intel_ring_emit(ring, |
481 | MI_BATCH_BUFFER_START | (2 << 6) | 463 | MI_BATCH_BUFFER_START | (2 << 6) |
482 | | MI_BATCH_NON_SECURE_I965); | 464 | | MI_BATCH_NON_SECURE_I965); |
483 | intel_ring_emit(dev, ring, exec_start); | 465 | intel_ring_emit(ring, exec_start); |
484 | } else { | 466 | } else { |
485 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | 467 | intel_ring_emit(ring, MI_BATCH_BUFFER_START |
486 | | (2 << 6)); | 468 | | (2 << 6)); |
487 | intel_ring_emit(dev, ring, exec_start | | 469 | intel_ring_emit(ring, exec_start | |
488 | MI_BATCH_NON_SECURE); | 470 | MI_BATCH_NON_SECURE); |
489 | } | 471 | } |
490 | } | 472 | } |
491 | intel_ring_advance(dev, ring); | 473 | intel_ring_advance(ring); |
492 | } | 474 | } |
493 | 475 | ||
494 | if (IS_G4X(dev) || IS_GEN5(dev)) { | 476 | if (IS_G4X(dev) || IS_GEN5(dev)) { |
495 | intel_ring_begin(dev, ring, 2); | 477 | intel_ring_begin(ring, 2); |
496 | intel_ring_emit(dev, ring, MI_FLUSH | | 478 | intel_ring_emit(ring, MI_FLUSH | |
497 | MI_NO_WRITE_FLUSH | | 479 | MI_NO_WRITE_FLUSH | |
498 | MI_INVALIDATE_ISP ); | 480 | MI_INVALIDATE_ISP ); |
499 | intel_ring_emit(dev, ring, MI_NOOP); | 481 | intel_ring_emit(ring, MI_NOOP); |
500 | intel_ring_advance(dev, ring); | 482 | intel_ring_advance(ring); |
501 | } | 483 | } |
502 | /* XXX breadcrumb */ | 484 | /* XXX breadcrumb */ |
503 | 485 | ||
504 | return 0; | 486 | return 0; |
505 | } | 487 | } |
506 | 488 | ||
507 | static void cleanup_status_page(struct drm_device *dev, | 489 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
508 | struct intel_ring_buffer *ring) | ||
509 | { | 490 | { |
510 | drm_i915_private_t *dev_priv = dev->dev_private; | 491 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
511 | struct drm_gem_object *obj; | 492 | struct drm_gem_object *obj; |
512 | struct drm_i915_gem_object *obj_priv; | 493 | struct drm_i915_gem_object *obj_priv; |
513 | 494 | ||
@@ -524,9 +505,9 @@ static void cleanup_status_page(struct drm_device *dev, | |||
524 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 505 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
525 | } | 506 | } |
526 | 507 | ||
527 | static int init_status_page(struct drm_device *dev, | 508 | static int init_status_page(struct intel_ring_buffer *ring) |
528 | struct intel_ring_buffer *ring) | ||
529 | { | 509 | { |
510 | struct drm_device *dev = ring->dev; | ||
530 | drm_i915_private_t *dev_priv = dev->dev_private; | 511 | drm_i915_private_t *dev_priv = dev->dev_private; |
531 | struct drm_gem_object *obj; | 512 | struct drm_gem_object *obj; |
532 | struct drm_i915_gem_object *obj_priv; | 513 | struct drm_i915_gem_object *obj_priv; |
@@ -555,7 +536,7 @@ static int init_status_page(struct drm_device *dev, | |||
555 | ring->status_page.obj = obj; | 536 | ring->status_page.obj = obj; |
556 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 537 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
557 | 538 | ||
558 | intel_ring_setup_status_page(dev, ring); | 539 | intel_ring_setup_status_page(ring); |
559 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | 540 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
560 | ring->name, ring->status_page.gfx_addr); | 541 | ring->name, ring->status_page.gfx_addr); |
561 | 542 | ||
@@ -583,7 +564,7 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
583 | INIT_LIST_HEAD(&ring->gpu_write_list); | 564 | INIT_LIST_HEAD(&ring->gpu_write_list); |
584 | 565 | ||
585 | if (I915_NEED_GFX_HWS(dev)) { | 566 | if (I915_NEED_GFX_HWS(dev)) { |
586 | ret = init_status_page(dev, ring); | 567 | ret = init_status_page(ring); |
587 | if (ret) | 568 | if (ret) |
588 | return ret; | 569 | return ret; |
589 | } | 570 | } |
@@ -616,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
616 | } | 597 | } |
617 | 598 | ||
618 | ring->virtual_start = ring->map.handle; | 599 | ring->virtual_start = ring->map.handle; |
619 | ret = ring->init(dev, ring); | 600 | ret = ring->init(ring); |
620 | if (ret) | 601 | if (ret) |
621 | goto err_unmap; | 602 | goto err_unmap; |
622 | 603 | ||
@@ -639,33 +620,32 @@ err_unref: | |||
639 | drm_gem_object_unreference(obj); | 620 | drm_gem_object_unreference(obj); |
640 | ring->gem_object = NULL; | 621 | ring->gem_object = NULL; |
641 | err_hws: | 622 | err_hws: |
642 | cleanup_status_page(dev, ring); | 623 | cleanup_status_page(ring); |
643 | return ret; | 624 | return ret; |
644 | } | 625 | } |
645 | 626 | ||
646 | void intel_cleanup_ring_buffer(struct drm_device *dev, | 627 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
647 | struct intel_ring_buffer *ring) | ||
648 | { | 628 | { |
649 | if (ring->gem_object == NULL) | 629 | if (ring->gem_object == NULL) |
650 | return; | 630 | return; |
651 | 631 | ||
652 | drm_core_ioremapfree(&ring->map, dev); | 632 | drm_core_ioremapfree(&ring->map, ring->dev); |
653 | 633 | ||
654 | i915_gem_object_unpin(ring->gem_object); | 634 | i915_gem_object_unpin(ring->gem_object); |
655 | drm_gem_object_unreference(ring->gem_object); | 635 | drm_gem_object_unreference(ring->gem_object); |
656 | ring->gem_object = NULL; | 636 | ring->gem_object = NULL; |
657 | cleanup_status_page(dev, ring); | 637 | |
638 | cleanup_status_page(ring); | ||
658 | } | 639 | } |
659 | 640 | ||
660 | static int intel_wrap_ring_buffer(struct drm_device *dev, | 641 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
661 | struct intel_ring_buffer *ring) | ||
662 | { | 642 | { |
663 | unsigned int *virt; | 643 | unsigned int *virt; |
664 | int rem; | 644 | int rem; |
665 | rem = ring->size - ring->tail; | 645 | rem = ring->size - ring->tail; |
666 | 646 | ||
667 | if (ring->space < rem) { | 647 | if (ring->space < rem) { |
668 | int ret = intel_wait_ring_buffer(dev, ring, rem); | 648 | int ret = intel_wait_ring_buffer(ring, rem); |
669 | if (ret) | 649 | if (ret) |
670 | return ret; | 650 | return ret; |
671 | } | 651 | } |
@@ -683,11 +663,11 @@ static int intel_wrap_ring_buffer(struct drm_device *dev, | |||
683 | return 0; | 663 | return 0; |
684 | } | 664 | } |
685 | 665 | ||
686 | int intel_wait_ring_buffer(struct drm_device *dev, | 666 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
687 | struct intel_ring_buffer *ring, int n) | ||
688 | { | 667 | { |
689 | unsigned long end; | 668 | struct drm_device *dev = ring->dev; |
690 | drm_i915_private_t *dev_priv = dev->dev_private; | 669 | drm_i915_private_t *dev_priv = dev->dev_private; |
670 | unsigned long end; | ||
691 | 671 | ||
692 | trace_i915_ring_wait_begin (dev); | 672 | trace_i915_ring_wait_begin (dev); |
693 | end = jiffies + 3 * HZ; | 673 | end = jiffies + 3 * HZ; |
@@ -697,7 +677,7 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
697 | if (ring->space < 0) | 677 | if (ring->space < 0) |
698 | ring->space += ring->size; | 678 | ring->space += ring->size; |
699 | if (ring->space >= n) { | 679 | if (ring->space >= n) { |
700 | trace_i915_ring_wait_end (dev); | 680 | trace_i915_ring_wait_end(dev); |
701 | return 0; | 681 | return 0; |
702 | } | 682 | } |
703 | 683 | ||
@@ -713,24 +693,24 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
713 | return -EBUSY; | 693 | return -EBUSY; |
714 | } | 694 | } |
715 | 695 | ||
716 | void intel_ring_begin(struct drm_device *dev, | 696 | void intel_ring_begin(struct intel_ring_buffer *ring, |
717 | struct intel_ring_buffer *ring, | ||
718 | int num_dwords) | 697 | int num_dwords) |
719 | { | 698 | { |
720 | int n = 4*num_dwords; | 699 | int n = 4*num_dwords; |
700 | |||
721 | if (unlikely(ring->tail + n > ring->size)) | 701 | if (unlikely(ring->tail + n > ring->size)) |
722 | intel_wrap_ring_buffer(dev, ring); | 702 | intel_wrap_ring_buffer(ring); |
703 | |||
723 | if (unlikely(ring->space < n)) | 704 | if (unlikely(ring->space < n)) |
724 | intel_wait_ring_buffer(dev, ring, n); | 705 | intel_wait_ring_buffer(ring, n); |
725 | 706 | ||
726 | ring->space -= n; | 707 | ring->space -= n; |
727 | } | 708 | } |
728 | 709 | ||
729 | void intel_ring_advance(struct drm_device *dev, | 710 | void intel_ring_advance(struct intel_ring_buffer *ring) |
730 | struct intel_ring_buffer *ring) | ||
731 | { | 711 | { |
732 | ring->tail &= ring->size - 1; | 712 | ring->tail &= ring->size - 1; |
733 | ring->write_tail(dev, ring, ring->tail); | 713 | ring->write_tail(ring, ring->tail); |
734 | } | 714 | } |
735 | 715 | ||
736 | static const struct intel_ring_buffer render_ring = { | 716 | static const struct intel_ring_buffer render_ring = { |
@@ -745,7 +725,7 @@ static const struct intel_ring_buffer render_ring = { | |||
745 | .get_seqno = render_ring_get_seqno, | 725 | .get_seqno = render_ring_get_seqno, |
746 | .user_irq_get = render_ring_get_user_irq, | 726 | .user_irq_get = render_ring_get_user_irq, |
747 | .user_irq_put = render_ring_put_user_irq, | 727 | .user_irq_put = render_ring_put_user_irq, |
748 | .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, | 728 | .dispatch_execbuffer = render_ring_dispatch_execbuffer, |
749 | }; | 729 | }; |
750 | 730 | ||
751 | /* ring buffer for bit-stream decoder */ | 731 | /* ring buffer for bit-stream decoder */ |
@@ -755,22 +735,21 @@ static const struct intel_ring_buffer bsd_ring = { | |||
755 | .id = RING_BSD, | 735 | .id = RING_BSD, |
756 | .mmio_base = BSD_RING_BASE, | 736 | .mmio_base = BSD_RING_BASE, |
757 | .size = 32 * PAGE_SIZE, | 737 | .size = 32 * PAGE_SIZE, |
758 | .init = init_bsd_ring, | 738 | .init = init_ring_common, |
759 | .write_tail = ring_write_tail, | 739 | .write_tail = ring_write_tail, |
760 | .flush = bsd_ring_flush, | 740 | .flush = bsd_ring_flush, |
761 | .add_request = ring_add_request, | 741 | .add_request = ring_add_request, |
762 | .get_seqno = ring_status_page_get_seqno, | 742 | .get_seqno = ring_status_page_get_seqno, |
763 | .user_irq_get = bsd_ring_get_user_irq, | 743 | .user_irq_get = bsd_ring_get_user_irq, |
764 | .user_irq_put = bsd_ring_put_user_irq, | 744 | .user_irq_put = bsd_ring_put_user_irq, |
765 | .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, | 745 | .dispatch_execbuffer = ring_dispatch_execbuffer, |
766 | }; | 746 | }; |
767 | 747 | ||
768 | 748 | ||
769 | static void gen6_bsd_ring_write_tail(struct drm_device *dev, | 749 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
770 | struct intel_ring_buffer *ring, | ||
771 | u32 value) | 750 | u32 value) |
772 | { | 751 | { |
773 | drm_i915_private_t *dev_priv = dev->dev_private; | 752 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
774 | 753 | ||
775 | /* Every tail move must follow the sequence below */ | 754 | /* Every tail move must follow the sequence below */ |
776 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | 755 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
@@ -789,36 +768,33 @@ static void gen6_bsd_ring_write_tail(struct drm_device *dev, | |||
789 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | 768 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); |
790 | } | 769 | } |
791 | 770 | ||
792 | static void gen6_ring_flush(struct drm_device *dev, | 771 | static void gen6_ring_flush(struct intel_ring_buffer *ring, |
793 | struct intel_ring_buffer *ring, | ||
794 | u32 invalidate_domains, | 772 | u32 invalidate_domains, |
795 | u32 flush_domains) | 773 | u32 flush_domains) |
796 | { | 774 | { |
797 | intel_ring_begin(dev, ring, 4); | 775 | intel_ring_begin(ring, 4); |
798 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | 776 | intel_ring_emit(ring, MI_FLUSH_DW); |
799 | intel_ring_emit(dev, ring, 0); | 777 | intel_ring_emit(ring, 0); |
800 | intel_ring_emit(dev, ring, 0); | 778 | intel_ring_emit(ring, 0); |
801 | intel_ring_emit(dev, ring, 0); | 779 | intel_ring_emit(ring, 0); |
802 | intel_ring_advance(dev, ring); | 780 | intel_ring_advance(ring); |
803 | } | 781 | } |
804 | 782 | ||
805 | static int | 783 | static int |
806 | gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 784 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
807 | struct intel_ring_buffer *ring, | 785 | struct drm_i915_gem_execbuffer2 *exec, |
808 | struct drm_i915_gem_execbuffer2 *exec, | 786 | struct drm_clip_rect *cliprects, |
809 | struct drm_clip_rect *cliprects, | 787 | uint64_t exec_offset) |
810 | uint64_t exec_offset) | ||
811 | { | 788 | { |
812 | uint32_t exec_start; | 789 | uint32_t exec_start; |
813 | 790 | ||
814 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 791 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; |
815 | 792 | ||
816 | intel_ring_begin(dev, ring, 2); | 793 | intel_ring_begin(ring, 2); |
817 | intel_ring_emit(dev, ring, | 794 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); |
818 | MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); | ||
819 | /* bit0-7 is the length on GEN6+ */ | 795 | /* bit0-7 is the length on GEN6+ */ |
820 | intel_ring_emit(dev, ring, exec_start); | 796 | intel_ring_emit(ring, exec_start); |
821 | intel_ring_advance(dev, ring); | 797 | intel_ring_advance(ring); |
822 | 798 | ||
823 | return 0; | 799 | return 0; |
824 | } | 800 | } |
@@ -829,27 +805,25 @@ static const struct intel_ring_buffer gen6_bsd_ring = { | |||
829 | .id = RING_BSD, | 805 | .id = RING_BSD, |
830 | .mmio_base = GEN6_BSD_RING_BASE, | 806 | .mmio_base = GEN6_BSD_RING_BASE, |
831 | .size = 32 * PAGE_SIZE, | 807 | .size = 32 * PAGE_SIZE, |
832 | .init = init_bsd_ring, | 808 | .init = init_ring_common, |
833 | .write_tail = gen6_bsd_ring_write_tail, | 809 | .write_tail = gen6_bsd_ring_write_tail, |
834 | .flush = gen6_ring_flush, | 810 | .flush = gen6_ring_flush, |
835 | .add_request = ring_add_request, | 811 | .add_request = ring_add_request, |
836 | .get_seqno = ring_status_page_get_seqno, | 812 | .get_seqno = ring_status_page_get_seqno, |
837 | .user_irq_get = bsd_ring_get_user_irq, | 813 | .user_irq_get = bsd_ring_get_user_irq, |
838 | .user_irq_put = bsd_ring_put_user_irq, | 814 | .user_irq_put = bsd_ring_put_user_irq, |
839 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | 815 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
840 | }; | 816 | }; |
841 | 817 | ||
842 | /* Blitter support (SandyBridge+) */ | 818 | /* Blitter support (SandyBridge+) */ |
843 | 819 | ||
844 | static void | 820 | static void |
845 | blt_ring_get_user_irq(struct drm_device *dev, | 821 | blt_ring_get_user_irq(struct intel_ring_buffer *ring) |
846 | struct intel_ring_buffer *ring) | ||
847 | { | 822 | { |
848 | /* do nothing */ | 823 | /* do nothing */ |
849 | } | 824 | } |
850 | static void | 825 | static void |
851 | blt_ring_put_user_irq(struct drm_device *dev, | 826 | blt_ring_put_user_irq(struct intel_ring_buffer *ring) |
852 | struct intel_ring_buffer *ring) | ||
853 | { | 827 | { |
854 | /* do nothing */ | 828 | /* do nothing */ |
855 | } | 829 | } |
@@ -866,7 +840,7 @@ static const struct intel_ring_buffer gen6_blt_ring = { | |||
866 | .get_seqno = ring_status_page_get_seqno, | 840 | .get_seqno = ring_status_page_get_seqno, |
867 | .user_irq_get = blt_ring_get_user_irq, | 841 | .user_irq_get = blt_ring_get_user_irq, |
868 | .user_irq_put = blt_ring_put_user_irq, | 842 | .user_irq_put = blt_ring_put_user_irq, |
869 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | 843 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
870 | }; | 844 | }; |
871 | 845 | ||
872 | int intel_init_render_ring_buffer(struct drm_device *dev) | 846 | int intel_init_render_ring_buffer(struct drm_device *dev) |