diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 1127 |
1 files changed, 708 insertions, 419 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 31cd7e33e820..f6b9baa6a63d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -48,14 +48,15 @@ static u32 i915_gem_get_seqno(struct drm_device *dev) | |||
48 | return seqno; | 48 | return seqno; |
49 | } | 49 | } |
50 | 50 | ||
51 | static void | 51 | static int |
52 | render_ring_flush(struct drm_device *dev, | 52 | render_ring_flush(struct intel_ring_buffer *ring, |
53 | struct intel_ring_buffer *ring, | ||
54 | u32 invalidate_domains, | 53 | u32 invalidate_domains, |
55 | u32 flush_domains) | 54 | u32 flush_domains) |
56 | { | 55 | { |
56 | struct drm_device *dev = ring->dev; | ||
57 | drm_i915_private_t *dev_priv = dev->dev_private; | 57 | drm_i915_private_t *dev_priv = dev->dev_private; |
58 | u32 cmd; | 58 | u32 cmd; |
59 | int ret; | ||
59 | 60 | ||
60 | #if WATCH_EXEC | 61 | #if WATCH_EXEC |
61 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | 62 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, |
@@ -109,49 +110,54 @@ render_ring_flush(struct drm_device *dev, | |||
109 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) | 110 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) |
110 | cmd |= MI_EXE_FLUSH; | 111 | cmd |= MI_EXE_FLUSH; |
111 | 112 | ||
113 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && | ||
114 | (IS_G4X(dev) || IS_GEN5(dev))) | ||
115 | cmd |= MI_INVALIDATE_ISP; | ||
116 | |||
112 | #if WATCH_EXEC | 117 | #if WATCH_EXEC |
113 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | 118 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); |
114 | #endif | 119 | #endif |
115 | intel_ring_begin(dev, ring, 2); | 120 | ret = intel_ring_begin(ring, 2); |
116 | intel_ring_emit(dev, ring, cmd); | 121 | if (ret) |
117 | intel_ring_emit(dev, ring, MI_NOOP); | 122 | return ret; |
118 | intel_ring_advance(dev, ring); | 123 | |
124 | intel_ring_emit(ring, cmd); | ||
125 | intel_ring_emit(ring, MI_NOOP); | ||
126 | intel_ring_advance(ring); | ||
119 | } | 127 | } |
128 | |||
129 | return 0; | ||
120 | } | 130 | } |
121 | 131 | ||
122 | static void ring_write_tail(struct drm_device *dev, | 132 | static void ring_write_tail(struct intel_ring_buffer *ring, |
123 | struct intel_ring_buffer *ring, | ||
124 | u32 value) | 133 | u32 value) |
125 | { | 134 | { |
126 | drm_i915_private_t *dev_priv = dev->dev_private; | 135 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
127 | I915_WRITE_TAIL(ring, value); | 136 | I915_WRITE_TAIL(ring, value); |
128 | } | 137 | } |
129 | 138 | ||
130 | u32 intel_ring_get_active_head(struct drm_device *dev, | 139 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) |
131 | struct intel_ring_buffer *ring) | ||
132 | { | 140 | { |
133 | drm_i915_private_t *dev_priv = dev->dev_private; | 141 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
134 | u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? | 142 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? |
135 | RING_ACTHD(ring->mmio_base) : ACTHD; | 143 | RING_ACTHD(ring->mmio_base) : ACTHD; |
136 | 144 | ||
137 | return I915_READ(acthd_reg); | 145 | return I915_READ(acthd_reg); |
138 | } | 146 | } |
139 | 147 | ||
140 | static int init_ring_common(struct drm_device *dev, | 148 | static int init_ring_common(struct intel_ring_buffer *ring) |
141 | struct intel_ring_buffer *ring) | ||
142 | { | 149 | { |
150 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | ||
151 | struct drm_i915_gem_object *obj = ring->obj; | ||
143 | u32 head; | 152 | u32 head; |
144 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
145 | struct drm_i915_gem_object *obj_priv; | ||
146 | obj_priv = to_intel_bo(ring->gem_object); | ||
147 | 153 | ||
148 | /* Stop the ring if it's running. */ | 154 | /* Stop the ring if it's running. */ |
149 | I915_WRITE_CTL(ring, 0); | 155 | I915_WRITE_CTL(ring, 0); |
150 | I915_WRITE_HEAD(ring, 0); | 156 | I915_WRITE_HEAD(ring, 0); |
151 | ring->write_tail(dev, ring, 0); | 157 | ring->write_tail(ring, 0); |
152 | 158 | ||
153 | /* Initialize the ring. */ | 159 | /* Initialize the ring. */ |
154 | I915_WRITE_START(ring, obj_priv->gtt_offset); | 160 | I915_WRITE_START(ring, obj->gtt_offset); |
155 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | 161 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
156 | 162 | ||
157 | /* G45 ring initialization fails to reset head to zero */ | 163 | /* G45 ring initialization fails to reset head to zero */ |
@@ -178,12 +184,13 @@ static int init_ring_common(struct drm_device *dev, | |||
178 | } | 184 | } |
179 | 185 | ||
180 | I915_WRITE_CTL(ring, | 186 | I915_WRITE_CTL(ring, |
181 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) | 187 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
182 | | RING_REPORT_64K | RING_VALID); | 188 | | RING_REPORT_64K | RING_VALID); |
183 | 189 | ||
184 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | ||
185 | /* If the head is still not zero, the ring is dead */ | 190 | /* If the head is still not zero, the ring is dead */ |
186 | if (head != 0) { | 191 | if ((I915_READ_CTL(ring) & RING_VALID) == 0 || |
192 | I915_READ_START(ring) != obj->gtt_offset || | ||
193 | (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { | ||
187 | DRM_ERROR("%s initialization failed " | 194 | DRM_ERROR("%s initialization failed " |
188 | "ctl %08x head %08x tail %08x start %08x\n", | 195 | "ctl %08x head %08x tail %08x start %08x\n", |
189 | ring->name, | 196 | ring->name, |
@@ -194,8 +201,8 @@ static int init_ring_common(struct drm_device *dev, | |||
194 | return -EIO; | 201 | return -EIO; |
195 | } | 202 | } |
196 | 203 | ||
197 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 204 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) |
198 | i915_kernel_lost_context(dev); | 205 | i915_kernel_lost_context(ring->dev); |
199 | else { | 206 | else { |
200 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; | 207 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
201 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 208 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
@@ -203,335 +210,562 @@ static int init_ring_common(struct drm_device *dev, | |||
203 | if (ring->space < 0) | 210 | if (ring->space < 0) |
204 | ring->space += ring->size; | 211 | ring->space += ring->size; |
205 | } | 212 | } |
213 | |||
206 | return 0; | 214 | return 0; |
207 | } | 215 | } |
208 | 216 | ||
209 | static int init_render_ring(struct drm_device *dev, | 217 | /* |
210 | struct intel_ring_buffer *ring) | 218 | * 965+ support PIPE_CONTROL commands, which provide finer grained control |
219 | * over cache flushing. | ||
220 | */ | ||
221 | struct pipe_control { | ||
222 | struct drm_i915_gem_object *obj; | ||
223 | volatile u32 *cpu_page; | ||
224 | u32 gtt_offset; | ||
225 | }; | ||
226 | |||
227 | static int | ||
228 | init_pipe_control(struct intel_ring_buffer *ring) | ||
211 | { | 229 | { |
212 | drm_i915_private_t *dev_priv = dev->dev_private; | 230 | struct pipe_control *pc; |
213 | int ret = init_ring_common(dev, ring); | 231 | struct drm_i915_gem_object *obj; |
214 | int mode; | 232 | int ret; |
233 | |||
234 | if (ring->private) | ||
235 | return 0; | ||
236 | |||
237 | pc = kmalloc(sizeof(*pc), GFP_KERNEL); | ||
238 | if (!pc) | ||
239 | return -ENOMEM; | ||
240 | |||
241 | obj = i915_gem_alloc_object(ring->dev, 4096); | ||
242 | if (obj == NULL) { | ||
243 | DRM_ERROR("Failed to allocate seqno page\n"); | ||
244 | ret = -ENOMEM; | ||
245 | goto err; | ||
246 | } | ||
247 | obj->agp_type = AGP_USER_CACHED_MEMORY; | ||
248 | |||
249 | ret = i915_gem_object_pin(obj, 4096, true); | ||
250 | if (ret) | ||
251 | goto err_unref; | ||
252 | |||
253 | pc->gtt_offset = obj->gtt_offset; | ||
254 | pc->cpu_page = kmap(obj->pages[0]); | ||
255 | if (pc->cpu_page == NULL) | ||
256 | goto err_unpin; | ||
257 | |||
258 | pc->obj = obj; | ||
259 | ring->private = pc; | ||
260 | return 0; | ||
261 | |||
262 | err_unpin: | ||
263 | i915_gem_object_unpin(obj); | ||
264 | err_unref: | ||
265 | drm_gem_object_unreference(&obj->base); | ||
266 | err: | ||
267 | kfree(pc); | ||
268 | return ret; | ||
269 | } | ||
270 | |||
271 | static void | ||
272 | cleanup_pipe_control(struct intel_ring_buffer *ring) | ||
273 | { | ||
274 | struct pipe_control *pc = ring->private; | ||
275 | struct drm_i915_gem_object *obj; | ||
276 | |||
277 | if (!ring->private) | ||
278 | return; | ||
279 | |||
280 | obj = pc->obj; | ||
281 | kunmap(obj->pages[0]); | ||
282 | i915_gem_object_unpin(obj); | ||
283 | drm_gem_object_unreference(&obj->base); | ||
284 | |||
285 | kfree(pc); | ||
286 | ring->private = NULL; | ||
287 | } | ||
288 | |||
289 | static int init_render_ring(struct intel_ring_buffer *ring) | ||
290 | { | ||
291 | struct drm_device *dev = ring->dev; | ||
292 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
293 | int ret = init_ring_common(ring); | ||
215 | 294 | ||
216 | if (INTEL_INFO(dev)->gen > 3) { | 295 | if (INTEL_INFO(dev)->gen > 3) { |
217 | mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; | 296 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
218 | if (IS_GEN6(dev)) | 297 | if (IS_GEN6(dev)) |
219 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | 298 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
220 | I915_WRITE(MI_MODE, mode); | 299 | I915_WRITE(MI_MODE, mode); |
221 | } | 300 | } |
301 | |||
302 | if (INTEL_INFO(dev)->gen >= 6) { | ||
303 | } else if (IS_GEN5(dev)) { | ||
304 | ret = init_pipe_control(ring); | ||
305 | if (ret) | ||
306 | return ret; | ||
307 | } | ||
308 | |||
222 | return ret; | 309 | return ret; |
223 | } | 310 | } |
224 | 311 | ||
225 | #define PIPE_CONTROL_FLUSH(addr) \ | 312 | static void render_ring_cleanup(struct intel_ring_buffer *ring) |
313 | { | ||
314 | if (!ring->private) | ||
315 | return; | ||
316 | |||
317 | cleanup_pipe_control(ring); | ||
318 | } | ||
319 | |||
320 | static void | ||
321 | update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno) | ||
322 | { | ||
323 | struct drm_device *dev = ring->dev; | ||
324 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
325 | int id; | ||
326 | |||
327 | /* | ||
328 | * cs -> 1 = vcs, 0 = bcs | ||
329 | * vcs -> 1 = bcs, 0 = cs, | ||
330 | * bcs -> 1 = cs, 0 = vcs. | ||
331 | */ | ||
332 | id = ring - dev_priv->ring; | ||
333 | id += 2 - i; | ||
334 | id %= 3; | ||
335 | |||
336 | intel_ring_emit(ring, | ||
337 | MI_SEMAPHORE_MBOX | | ||
338 | MI_SEMAPHORE_REGISTER | | ||
339 | MI_SEMAPHORE_UPDATE); | ||
340 | intel_ring_emit(ring, seqno); | ||
341 | intel_ring_emit(ring, | ||
342 | RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i); | ||
343 | } | ||
344 | |||
345 | static int | ||
346 | gen6_add_request(struct intel_ring_buffer *ring, | ||
347 | u32 *result) | ||
348 | { | ||
349 | u32 seqno; | ||
350 | int ret; | ||
351 | |||
352 | ret = intel_ring_begin(ring, 10); | ||
353 | if (ret) | ||
354 | return ret; | ||
355 | |||
356 | seqno = i915_gem_get_seqno(ring->dev); | ||
357 | update_semaphore(ring, 0, seqno); | ||
358 | update_semaphore(ring, 1, seqno); | ||
359 | |||
360 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | ||
361 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
362 | intel_ring_emit(ring, seqno); | ||
363 | intel_ring_emit(ring, MI_USER_INTERRUPT); | ||
364 | intel_ring_advance(ring); | ||
365 | |||
366 | *result = seqno; | ||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | int | ||
371 | intel_ring_sync(struct intel_ring_buffer *ring, | ||
372 | struct intel_ring_buffer *to, | ||
373 | u32 seqno) | ||
374 | { | ||
375 | int ret; | ||
376 | |||
377 | ret = intel_ring_begin(ring, 4); | ||
378 | if (ret) | ||
379 | return ret; | ||
380 | |||
381 | intel_ring_emit(ring, | ||
382 | MI_SEMAPHORE_MBOX | | ||
383 | MI_SEMAPHORE_REGISTER | | ||
384 | intel_ring_sync_index(ring, to) << 17 | | ||
385 | MI_SEMAPHORE_COMPARE); | ||
386 | intel_ring_emit(ring, seqno); | ||
387 | intel_ring_emit(ring, 0); | ||
388 | intel_ring_emit(ring, MI_NOOP); | ||
389 | intel_ring_advance(ring); | ||
390 | |||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ | ||
226 | do { \ | 395 | do { \ |
227 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | 396 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ |
228 | PIPE_CONTROL_DEPTH_STALL | 2); \ | 397 | PIPE_CONTROL_DEPTH_STALL | 2); \ |
229 | OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ | 398 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
230 | OUT_RING(0); \ | 399 | intel_ring_emit(ring__, 0); \ |
231 | OUT_RING(0); \ | 400 | intel_ring_emit(ring__, 0); \ |
232 | } while (0) | 401 | } while (0) |
233 | 402 | ||
234 | /** | 403 | static int |
235 | * Creates a new sequence number, emitting a write of it to the status page | 404 | pc_render_add_request(struct intel_ring_buffer *ring, |
236 | * plus an interrupt, which will trigger i915_user_interrupt_handler. | 405 | u32 *result) |
237 | * | ||
238 | * Must be called with struct_lock held. | ||
239 | * | ||
240 | * Returned sequence numbers are nonzero on success. | ||
241 | */ | ||
242 | static u32 | ||
243 | render_ring_add_request(struct drm_device *dev, | ||
244 | struct intel_ring_buffer *ring, | ||
245 | u32 flush_domains) | ||
246 | { | 406 | { |
247 | drm_i915_private_t *dev_priv = dev->dev_private; | 407 | struct drm_device *dev = ring->dev; |
248 | u32 seqno; | 408 | u32 seqno = i915_gem_get_seqno(dev); |
409 | struct pipe_control *pc = ring->private; | ||
410 | u32 scratch_addr = pc->gtt_offset + 128; | ||
411 | int ret; | ||
412 | |||
413 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently | ||
414 | * incoherent with writes to memory, i.e. completely fubar, | ||
415 | * so we need to use PIPE_NOTIFY instead. | ||
416 | * | ||
417 | * However, we also need to workaround the qword write | ||
418 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to | ||
419 | * memory before requesting an interrupt. | ||
420 | */ | ||
421 | ret = intel_ring_begin(ring, 32); | ||
422 | if (ret) | ||
423 | return ret; | ||
424 | |||
425 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
426 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | ||
427 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | ||
428 | intel_ring_emit(ring, seqno); | ||
429 | intel_ring_emit(ring, 0); | ||
430 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
431 | scratch_addr += 128; /* write to separate cachelines */ | ||
432 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
433 | scratch_addr += 128; | ||
434 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
435 | scratch_addr += 128; | ||
436 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
437 | scratch_addr += 128; | ||
438 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
439 | scratch_addr += 128; | ||
440 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | ||
441 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
442 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | ||
443 | PIPE_CONTROL_NOTIFY); | ||
444 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | ||
445 | intel_ring_emit(ring, seqno); | ||
446 | intel_ring_emit(ring, 0); | ||
447 | intel_ring_advance(ring); | ||
448 | |||
449 | *result = seqno; | ||
450 | return 0; | ||
451 | } | ||
249 | 452 | ||
250 | seqno = i915_gem_get_seqno(dev); | 453 | static int |
251 | 454 | render_ring_add_request(struct intel_ring_buffer *ring, | |
252 | if (IS_GEN6(dev)) { | 455 | u32 *result) |
253 | BEGIN_LP_RING(6); | 456 | { |
254 | OUT_RING(GFX_OP_PIPE_CONTROL | 3); | 457 | struct drm_device *dev = ring->dev; |
255 | OUT_RING(PIPE_CONTROL_QW_WRITE | | 458 | u32 seqno = i915_gem_get_seqno(dev); |
256 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | | 459 | int ret; |
257 | PIPE_CONTROL_NOTIFY); | ||
258 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
259 | OUT_RING(seqno); | ||
260 | OUT_RING(0); | ||
261 | OUT_RING(0); | ||
262 | ADVANCE_LP_RING(); | ||
263 | } else if (HAS_PIPE_CONTROL(dev)) { | ||
264 | u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; | ||
265 | 460 | ||
266 | /* | 461 | ret = intel_ring_begin(ring, 4); |
267 | * Workaround qword write incoherence by flushing the | 462 | if (ret) |
268 | * PIPE_NOTIFY buffers out to memory before requesting | 463 | return ret; |
269 | * an interrupt. | ||
270 | */ | ||
271 | BEGIN_LP_RING(32); | ||
272 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
273 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | ||
274 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
275 | OUT_RING(seqno); | ||
276 | OUT_RING(0); | ||
277 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
278 | scratch_addr += 128; /* write to separate cachelines */ | ||
279 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
280 | scratch_addr += 128; | ||
281 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
282 | scratch_addr += 128; | ||
283 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
284 | scratch_addr += 128; | ||
285 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
286 | scratch_addr += 128; | ||
287 | PIPE_CONTROL_FLUSH(scratch_addr); | ||
288 | OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | ||
289 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | ||
290 | PIPE_CONTROL_NOTIFY); | ||
291 | OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
292 | OUT_RING(seqno); | ||
293 | OUT_RING(0); | ||
294 | ADVANCE_LP_RING(); | ||
295 | } else { | ||
296 | BEGIN_LP_RING(4); | ||
297 | OUT_RING(MI_STORE_DWORD_INDEX); | ||
298 | OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
299 | OUT_RING(seqno); | ||
300 | 464 | ||
301 | OUT_RING(MI_USER_INTERRUPT); | 465 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
302 | ADVANCE_LP_RING(); | 466 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
303 | } | 467 | intel_ring_emit(ring, seqno); |
304 | return seqno; | 468 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
469 | intel_ring_advance(ring); | ||
470 | |||
471 | *result = seqno; | ||
472 | return 0; | ||
305 | } | 473 | } |
306 | 474 | ||
307 | static u32 | 475 | static u32 |
308 | render_ring_get_seqno(struct drm_device *dev, | 476 | ring_get_seqno(struct intel_ring_buffer *ring) |
309 | struct intel_ring_buffer *ring) | ||
310 | { | 477 | { |
311 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 478 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
312 | if (HAS_PIPE_CONTROL(dev)) | 479 | } |
313 | return ((volatile u32 *)(dev_priv->seqno_page))[0]; | 480 | |
314 | else | 481 | static u32 |
315 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 482 | pc_render_get_seqno(struct intel_ring_buffer *ring) |
483 | { | ||
484 | struct pipe_control *pc = ring->private; | ||
485 | return pc->cpu_page[0]; | ||
316 | } | 486 | } |
317 | 487 | ||
318 | static void | 488 | static void |
319 | render_ring_get_user_irq(struct drm_device *dev, | 489 | ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) |
320 | struct intel_ring_buffer *ring) | ||
321 | { | 490 | { |
322 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 491 | dev_priv->gt_irq_mask &= ~mask; |
323 | unsigned long irqflags; | 492 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
493 | POSTING_READ(GTIMR); | ||
494 | } | ||
495 | |||
496 | static void | ||
497 | ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
498 | { | ||
499 | dev_priv->gt_irq_mask |= mask; | ||
500 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
501 | POSTING_READ(GTIMR); | ||
502 | } | ||
503 | |||
504 | static void | ||
505 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
506 | { | ||
507 | dev_priv->irq_mask &= ~mask; | ||
508 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
509 | POSTING_READ(IMR); | ||
510 | } | ||
511 | |||
512 | static void | ||
513 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) | ||
514 | { | ||
515 | dev_priv->irq_mask |= mask; | ||
516 | I915_WRITE(IMR, dev_priv->irq_mask); | ||
517 | POSTING_READ(IMR); | ||
518 | } | ||
324 | 519 | ||
325 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 520 | static bool |
326 | if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { | 521 | render_ring_get_irq(struct intel_ring_buffer *ring) |
522 | { | ||
523 | struct drm_device *dev = ring->dev; | ||
524 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
525 | |||
526 | if (!dev->irq_enabled) | ||
527 | return false; | ||
528 | |||
529 | spin_lock(&ring->irq_lock); | ||
530 | if (ring->irq_refcount++ == 0) { | ||
327 | if (HAS_PCH_SPLIT(dev)) | 531 | if (HAS_PCH_SPLIT(dev)) |
328 | ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); | 532 | ironlake_enable_irq(dev_priv, |
533 | GT_PIPE_NOTIFY | GT_USER_INTERRUPT); | ||
329 | else | 534 | else |
330 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); | 535 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
331 | } | 536 | } |
332 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 537 | spin_unlock(&ring->irq_lock); |
538 | |||
539 | return true; | ||
333 | } | 540 | } |
334 | 541 | ||
335 | static void | 542 | static void |
336 | render_ring_put_user_irq(struct drm_device *dev, | 543 | render_ring_put_irq(struct intel_ring_buffer *ring) |
337 | struct intel_ring_buffer *ring) | ||
338 | { | 544 | { |
339 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 545 | struct drm_device *dev = ring->dev; |
340 | unsigned long irqflags; | 546 | drm_i915_private_t *dev_priv = dev->dev_private; |
341 | 547 | ||
342 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 548 | spin_lock(&ring->irq_lock); |
343 | BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); | 549 | if (--ring->irq_refcount == 0) { |
344 | if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { | ||
345 | if (HAS_PCH_SPLIT(dev)) | 550 | if (HAS_PCH_SPLIT(dev)) |
346 | ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); | 551 | ironlake_disable_irq(dev_priv, |
552 | GT_USER_INTERRUPT | | ||
553 | GT_PIPE_NOTIFY); | ||
347 | else | 554 | else |
348 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); | 555 | i915_disable_irq(dev_priv, I915_USER_INTERRUPT); |
349 | } | 556 | } |
350 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 557 | spin_unlock(&ring->irq_lock); |
351 | } | 558 | } |
352 | 559 | ||
353 | void intel_ring_setup_status_page(struct drm_device *dev, | 560 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
354 | struct intel_ring_buffer *ring) | ||
355 | { | 561 | { |
356 | drm_i915_private_t *dev_priv = dev->dev_private; | 562 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
357 | if (IS_GEN6(dev)) { | 563 | u32 mmio = IS_GEN6(ring->dev) ? |
358 | I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), | 564 | RING_HWS_PGA_GEN6(ring->mmio_base) : |
359 | ring->status_page.gfx_addr); | 565 | RING_HWS_PGA(ring->mmio_base); |
360 | I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */ | 566 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
361 | } else { | 567 | POSTING_READ(mmio); |
362 | I915_WRITE(RING_HWS_PGA(ring->mmio_base), | ||
363 | ring->status_page.gfx_addr); | ||
364 | I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */ | ||
365 | } | ||
366 | |||
367 | } | 568 | } |
368 | 569 | ||
369 | static void | 570 | static int |
370 | bsd_ring_flush(struct drm_device *dev, | 571 | bsd_ring_flush(struct intel_ring_buffer *ring, |
371 | struct intel_ring_buffer *ring, | 572 | u32 invalidate_domains, |
372 | u32 invalidate_domains, | 573 | u32 flush_domains) |
373 | u32 flush_domains) | ||
374 | { | 574 | { |
375 | intel_ring_begin(dev, ring, 2); | 575 | int ret; |
376 | intel_ring_emit(dev, ring, MI_FLUSH); | ||
377 | intel_ring_emit(dev, ring, MI_NOOP); | ||
378 | intel_ring_advance(dev, ring); | ||
379 | } | ||
380 | 576 | ||
381 | static int init_bsd_ring(struct drm_device *dev, | 577 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) |
382 | struct intel_ring_buffer *ring) | 578 | return 0; |
383 | { | 579 | |
384 | return init_ring_common(dev, ring); | 580 | ret = intel_ring_begin(ring, 2); |
581 | if (ret) | ||
582 | return ret; | ||
583 | |||
584 | intel_ring_emit(ring, MI_FLUSH); | ||
585 | intel_ring_emit(ring, MI_NOOP); | ||
586 | intel_ring_advance(ring); | ||
587 | return 0; | ||
385 | } | 588 | } |
386 | 589 | ||
387 | static u32 | 590 | static int |
388 | ring_add_request(struct drm_device *dev, | 591 | ring_add_request(struct intel_ring_buffer *ring, |
389 | struct intel_ring_buffer *ring, | 592 | u32 *result) |
390 | u32 flush_domains) | ||
391 | { | 593 | { |
392 | u32 seqno; | 594 | u32 seqno; |
595 | int ret; | ||
393 | 596 | ||
394 | seqno = i915_gem_get_seqno(dev); | 597 | ret = intel_ring_begin(ring, 4); |
598 | if (ret) | ||
599 | return ret; | ||
600 | |||
601 | seqno = i915_gem_get_seqno(ring->dev); | ||
395 | 602 | ||
396 | intel_ring_begin(dev, ring, 4); | 603 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
397 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | 604 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
398 | intel_ring_emit(dev, ring, | 605 | intel_ring_emit(ring, seqno); |
399 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 606 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
400 | intel_ring_emit(dev, ring, seqno); | 607 | intel_ring_advance(ring); |
401 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); | ||
402 | intel_ring_advance(dev, ring); | ||
403 | 608 | ||
404 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | 609 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); |
610 | *result = seqno; | ||
611 | return 0; | ||
612 | } | ||
405 | 613 | ||
406 | return seqno; | 614 | static bool |
615 | ring_get_irq(struct intel_ring_buffer *ring, u32 flag) | ||
616 | { | ||
617 | struct drm_device *dev = ring->dev; | ||
618 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
619 | |||
620 | if (!dev->irq_enabled) | ||
621 | return false; | ||
622 | |||
623 | spin_lock(&ring->irq_lock); | ||
624 | if (ring->irq_refcount++ == 0) | ||
625 | ironlake_enable_irq(dev_priv, flag); | ||
626 | spin_unlock(&ring->irq_lock); | ||
627 | |||
628 | return true; | ||
407 | } | 629 | } |
408 | 630 | ||
409 | static void | 631 | static void |
410 | bsd_ring_get_user_irq(struct drm_device *dev, | 632 | ring_put_irq(struct intel_ring_buffer *ring, u32 flag) |
411 | struct intel_ring_buffer *ring) | 633 | { |
634 | struct drm_device *dev = ring->dev; | ||
635 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
636 | |||
637 | spin_lock(&ring->irq_lock); | ||
638 | if (--ring->irq_refcount == 0) | ||
639 | ironlake_disable_irq(dev_priv, flag); | ||
640 | spin_unlock(&ring->irq_lock); | ||
641 | } | ||
642 | |||
643 | static bool | ||
644 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | ||
412 | { | 645 | { |
413 | /* do nothing */ | 646 | struct drm_device *dev = ring->dev; |
647 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
648 | |||
649 | if (!dev->irq_enabled) | ||
650 | return false; | ||
651 | |||
652 | spin_lock(&ring->irq_lock); | ||
653 | if (ring->irq_refcount++ == 0) { | ||
654 | ring->irq_mask &= ~rflag; | ||
655 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
656 | ironlake_enable_irq(dev_priv, gflag); | ||
657 | } | ||
658 | spin_unlock(&ring->irq_lock); | ||
659 | |||
660 | return true; | ||
414 | } | 661 | } |
662 | |||
415 | static void | 663 | static void |
416 | bsd_ring_put_user_irq(struct drm_device *dev, | 664 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
417 | struct intel_ring_buffer *ring) | ||
418 | { | 665 | { |
419 | /* do nothing */ | 666 | struct drm_device *dev = ring->dev; |
667 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
668 | |||
669 | spin_lock(&ring->irq_lock); | ||
670 | if (--ring->irq_refcount == 0) { | ||
671 | ring->irq_mask |= rflag; | ||
672 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
673 | ironlake_disable_irq(dev_priv, gflag); | ||
674 | } | ||
675 | spin_unlock(&ring->irq_lock); | ||
420 | } | 676 | } |
421 | 677 | ||
422 | static u32 | 678 | static bool |
423 | ring_status_page_get_seqno(struct drm_device *dev, | 679 | bsd_ring_get_irq(struct intel_ring_buffer *ring) |
424 | struct intel_ring_buffer *ring) | ||
425 | { | 680 | { |
426 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); | 681 | return ring_get_irq(ring, GT_BSD_USER_INTERRUPT); |
682 | } | ||
683 | static void | ||
684 | bsd_ring_put_irq(struct intel_ring_buffer *ring) | ||
685 | { | ||
686 | ring_put_irq(ring, GT_BSD_USER_INTERRUPT); | ||
427 | } | 687 | } |
428 | 688 | ||
429 | static int | 689 | static int |
430 | ring_dispatch_gem_execbuffer(struct drm_device *dev, | 690 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
431 | struct intel_ring_buffer *ring, | ||
432 | struct drm_i915_gem_execbuffer2 *exec, | ||
433 | struct drm_clip_rect *cliprects, | ||
434 | uint64_t exec_offset) | ||
435 | { | 691 | { |
436 | uint32_t exec_start; | 692 | int ret; |
437 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 693 | |
438 | intel_ring_begin(dev, ring, 2); | 694 | ret = intel_ring_begin(ring, 2); |
439 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | | 695 | if (ret) |
440 | (2 << 6) | MI_BATCH_NON_SECURE_I965); | 696 | return ret; |
441 | intel_ring_emit(dev, ring, exec_start); | 697 | |
442 | intel_ring_advance(dev, ring); | 698 | intel_ring_emit(ring, |
699 | MI_BATCH_BUFFER_START | (2 << 6) | | ||
700 | MI_BATCH_NON_SECURE_I965); | ||
701 | intel_ring_emit(ring, offset); | ||
702 | intel_ring_advance(ring); | ||
703 | |||
443 | return 0; | 704 | return 0; |
444 | } | 705 | } |
445 | 706 | ||
446 | static int | 707 | static int |
447 | render_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 708 | render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
448 | struct intel_ring_buffer *ring, | 709 | u32 offset, u32 len) |
449 | struct drm_i915_gem_execbuffer2 *exec, | ||
450 | struct drm_clip_rect *cliprects, | ||
451 | uint64_t exec_offset) | ||
452 | { | 710 | { |
711 | struct drm_device *dev = ring->dev; | ||
453 | drm_i915_private_t *dev_priv = dev->dev_private; | 712 | drm_i915_private_t *dev_priv = dev->dev_private; |
454 | int nbox = exec->num_cliprects; | 713 | int ret; |
455 | int i = 0, count; | ||
456 | uint32_t exec_start, exec_len; | ||
457 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
458 | exec_len = (uint32_t) exec->batch_len; | ||
459 | 714 | ||
460 | trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); | 715 | trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); |
461 | 716 | ||
462 | count = nbox ? nbox : 1; | 717 | if (IS_I830(dev) || IS_845G(dev)) { |
718 | ret = intel_ring_begin(ring, 4); | ||
719 | if (ret) | ||
720 | return ret; | ||
463 | 721 | ||
464 | for (i = 0; i < count; i++) { | 722 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
465 | if (i < nbox) { | 723 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
466 | int ret = i915_emit_box(dev, cliprects, i, | 724 | intel_ring_emit(ring, offset + len - 8); |
467 | exec->DR1, exec->DR4); | 725 | intel_ring_emit(ring, 0); |
468 | if (ret) | 726 | } else { |
469 | return ret; | 727 | ret = intel_ring_begin(ring, 2); |
470 | } | 728 | if (ret) |
729 | return ret; | ||
471 | 730 | ||
472 | if (IS_I830(dev) || IS_845G(dev)) { | 731 | if (INTEL_INFO(dev)->gen >= 4) { |
473 | intel_ring_begin(dev, ring, 4); | 732 | intel_ring_emit(ring, |
474 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER); | 733 | MI_BATCH_BUFFER_START | (2 << 6) | |
475 | intel_ring_emit(dev, ring, | 734 | MI_BATCH_NON_SECURE_I965); |
476 | exec_start | MI_BATCH_NON_SECURE); | 735 | intel_ring_emit(ring, offset); |
477 | intel_ring_emit(dev, ring, exec_start + exec_len - 4); | ||
478 | intel_ring_emit(dev, ring, 0); | ||
479 | } else { | 736 | } else { |
480 | intel_ring_begin(dev, ring, 2); | 737 | intel_ring_emit(ring, |
481 | if (INTEL_INFO(dev)->gen >= 4) { | 738 | MI_BATCH_BUFFER_START | (2 << 6)); |
482 | intel_ring_emit(dev, ring, | 739 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
483 | MI_BATCH_BUFFER_START | (2 << 6) | ||
484 | | MI_BATCH_NON_SECURE_I965); | ||
485 | intel_ring_emit(dev, ring, exec_start); | ||
486 | } else { | ||
487 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | ||
488 | | (2 << 6)); | ||
489 | intel_ring_emit(dev, ring, exec_start | | ||
490 | MI_BATCH_NON_SECURE); | ||
491 | } | ||
492 | } | 740 | } |
493 | intel_ring_advance(dev, ring); | ||
494 | } | 741 | } |
495 | 742 | intel_ring_advance(ring); | |
496 | if (IS_G4X(dev) || IS_GEN5(dev)) { | ||
497 | intel_ring_begin(dev, ring, 2); | ||
498 | intel_ring_emit(dev, ring, MI_FLUSH | | ||
499 | MI_NO_WRITE_FLUSH | | ||
500 | MI_INVALIDATE_ISP ); | ||
501 | intel_ring_emit(dev, ring, MI_NOOP); | ||
502 | intel_ring_advance(dev, ring); | ||
503 | } | ||
504 | /* XXX breadcrumb */ | ||
505 | 743 | ||
506 | return 0; | 744 | return 0; |
507 | } | 745 | } |
508 | 746 | ||
509 | static void cleanup_status_page(struct drm_device *dev, | 747 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
510 | struct intel_ring_buffer *ring) | ||
511 | { | 748 | { |
512 | drm_i915_private_t *dev_priv = dev->dev_private; | 749 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
513 | struct drm_gem_object *obj; | 750 | struct drm_i915_gem_object *obj; |
514 | struct drm_i915_gem_object *obj_priv; | ||
515 | 751 | ||
516 | obj = ring->status_page.obj; | 752 | obj = ring->status_page.obj; |
517 | if (obj == NULL) | 753 | if (obj == NULL) |
518 | return; | 754 | return; |
519 | obj_priv = to_intel_bo(obj); | ||
520 | 755 | ||
521 | kunmap(obj_priv->pages[0]); | 756 | kunmap(obj->pages[0]); |
522 | i915_gem_object_unpin(obj); | 757 | i915_gem_object_unpin(obj); |
523 | drm_gem_object_unreference(obj); | 758 | drm_gem_object_unreference(&obj->base); |
524 | ring->status_page.obj = NULL; | 759 | ring->status_page.obj = NULL; |
525 | 760 | ||
526 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 761 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
527 | } | 762 | } |
528 | 763 | ||
529 | static int init_status_page(struct drm_device *dev, | 764 | static int init_status_page(struct intel_ring_buffer *ring) |
530 | struct intel_ring_buffer *ring) | ||
531 | { | 765 | { |
766 | struct drm_device *dev = ring->dev; | ||
532 | drm_i915_private_t *dev_priv = dev->dev_private; | 767 | drm_i915_private_t *dev_priv = dev->dev_private; |
533 | struct drm_gem_object *obj; | 768 | struct drm_i915_gem_object *obj; |
534 | struct drm_i915_gem_object *obj_priv; | ||
535 | int ret; | 769 | int ret; |
536 | 770 | ||
537 | obj = i915_gem_alloc_object(dev, 4096); | 771 | obj = i915_gem_alloc_object(dev, 4096); |
@@ -540,16 +774,15 @@ static int init_status_page(struct drm_device *dev, | |||
540 | ret = -ENOMEM; | 774 | ret = -ENOMEM; |
541 | goto err; | 775 | goto err; |
542 | } | 776 | } |
543 | obj_priv = to_intel_bo(obj); | 777 | obj->agp_type = AGP_USER_CACHED_MEMORY; |
544 | obj_priv->agp_type = AGP_USER_CACHED_MEMORY; | ||
545 | 778 | ||
546 | ret = i915_gem_object_pin(obj, 4096); | 779 | ret = i915_gem_object_pin(obj, 4096, true); |
547 | if (ret != 0) { | 780 | if (ret != 0) { |
548 | goto err_unref; | 781 | goto err_unref; |
549 | } | 782 | } |
550 | 783 | ||
551 | ring->status_page.gfx_addr = obj_priv->gtt_offset; | 784 | ring->status_page.gfx_addr = obj->gtt_offset; |
552 | ring->status_page.page_addr = kmap(obj_priv->pages[0]); | 785 | ring->status_page.page_addr = kmap(obj->pages[0]); |
553 | if (ring->status_page.page_addr == NULL) { | 786 | if (ring->status_page.page_addr == NULL) { |
554 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | 787 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
555 | goto err_unpin; | 788 | goto err_unpin; |
@@ -557,7 +790,7 @@ static int init_status_page(struct drm_device *dev, | |||
557 | ring->status_page.obj = obj; | 790 | ring->status_page.obj = obj; |
558 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); | 791 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
559 | 792 | ||
560 | intel_ring_setup_status_page(dev, ring); | 793 | intel_ring_setup_status_page(ring); |
561 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", | 794 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
562 | ring->name, ring->status_page.gfx_addr); | 795 | ring->name, ring->status_page.gfx_addr); |
563 | 796 | ||
@@ -566,7 +799,7 @@ static int init_status_page(struct drm_device *dev, | |||
566 | err_unpin: | 799 | err_unpin: |
567 | i915_gem_object_unpin(obj); | 800 | i915_gem_object_unpin(obj); |
568 | err_unref: | 801 | err_unref: |
569 | drm_gem_object_unreference(obj); | 802 | drm_gem_object_unreference(&obj->base); |
570 | err: | 803 | err: |
571 | return ret; | 804 | return ret; |
572 | } | 805 | } |
@@ -574,9 +807,7 @@ err: | |||
574 | int intel_init_ring_buffer(struct drm_device *dev, | 807 | int intel_init_ring_buffer(struct drm_device *dev, |
575 | struct intel_ring_buffer *ring) | 808 | struct intel_ring_buffer *ring) |
576 | { | 809 | { |
577 | struct drm_i915_private *dev_priv = dev->dev_private; | 810 | struct drm_i915_gem_object *obj; |
578 | struct drm_i915_gem_object *obj_priv; | ||
579 | struct drm_gem_object *obj; | ||
580 | int ret; | 811 | int ret; |
581 | 812 | ||
582 | ring->dev = dev; | 813 | ring->dev = dev; |
@@ -584,8 +815,11 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
584 | INIT_LIST_HEAD(&ring->request_list); | 815 | INIT_LIST_HEAD(&ring->request_list); |
585 | INIT_LIST_HEAD(&ring->gpu_write_list); | 816 | INIT_LIST_HEAD(&ring->gpu_write_list); |
586 | 817 | ||
818 | spin_lock_init(&ring->irq_lock); | ||
819 | ring->irq_mask = ~0; | ||
820 | |||
587 | if (I915_NEED_GFX_HWS(dev)) { | 821 | if (I915_NEED_GFX_HWS(dev)) { |
588 | ret = init_status_page(dev, ring); | 822 | ret = init_status_page(ring); |
589 | if (ret) | 823 | if (ret) |
590 | return ret; | 824 | return ret; |
591 | } | 825 | } |
@@ -597,15 +831,14 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
597 | goto err_hws; | 831 | goto err_hws; |
598 | } | 832 | } |
599 | 833 | ||
600 | ring->gem_object = obj; | 834 | ring->obj = obj; |
601 | 835 | ||
602 | ret = i915_gem_object_pin(obj, PAGE_SIZE); | 836 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); |
603 | if (ret) | 837 | if (ret) |
604 | goto err_unref; | 838 | goto err_unref; |
605 | 839 | ||
606 | obj_priv = to_intel_bo(obj); | ||
607 | ring->map.size = ring->size; | 840 | ring->map.size = ring->size; |
608 | ring->map.offset = dev->agp->base + obj_priv->gtt_offset; | 841 | ring->map.offset = dev->agp->base + obj->gtt_offset; |
609 | ring->map.type = 0; | 842 | ring->map.type = 0; |
610 | ring->map.flags = 0; | 843 | ring->map.flags = 0; |
611 | ring->map.mtrr = 0; | 844 | ring->map.mtrr = 0; |
@@ -618,60 +851,64 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
618 | } | 851 | } |
619 | 852 | ||
620 | ring->virtual_start = ring->map.handle; | 853 | ring->virtual_start = ring->map.handle; |
621 | ret = ring->init(dev, ring); | 854 | ret = ring->init(ring); |
622 | if (ret) | 855 | if (ret) |
623 | goto err_unmap; | 856 | goto err_unmap; |
624 | 857 | ||
625 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 858 | /* Workaround an erratum on the i830 which causes a hang if |
626 | i915_kernel_lost_context(dev); | 859 | * the TAIL pointer points to within the last 2 cachelines |
627 | else { | 860 | * of the buffer. |
628 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; | 861 | */ |
629 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 862 | ring->effective_size = ring->size; |
630 | ring->space = ring->head - (ring->tail + 8); | 863 | if (IS_I830(ring->dev)) |
631 | if (ring->space < 0) | 864 | ring->effective_size -= 128; |
632 | ring->space += ring->size; | 865 | |
633 | } | 866 | return 0; |
634 | return ret; | ||
635 | 867 | ||
636 | err_unmap: | 868 | err_unmap: |
637 | drm_core_ioremapfree(&ring->map, dev); | 869 | drm_core_ioremapfree(&ring->map, dev); |
638 | err_unpin: | 870 | err_unpin: |
639 | i915_gem_object_unpin(obj); | 871 | i915_gem_object_unpin(obj); |
640 | err_unref: | 872 | err_unref: |
641 | drm_gem_object_unreference(obj); | 873 | drm_gem_object_unreference(&obj->base); |
642 | ring->gem_object = NULL; | 874 | ring->obj = NULL; |
643 | err_hws: | 875 | err_hws: |
644 | cleanup_status_page(dev, ring); | 876 | cleanup_status_page(ring); |
645 | return ret; | 877 | return ret; |
646 | } | 878 | } |
647 | 879 | ||
648 | void intel_cleanup_ring_buffer(struct drm_device *dev, | 880 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
649 | struct intel_ring_buffer *ring) | ||
650 | { | 881 | { |
651 | if (ring->gem_object == NULL) | 882 | struct drm_i915_private *dev_priv; |
883 | int ret; | ||
884 | |||
885 | if (ring->obj == NULL) | ||
652 | return; | 886 | return; |
653 | 887 | ||
654 | drm_core_ioremapfree(&ring->map, dev); | 888 | /* Disable the ring buffer. The ring must be idle at this point */ |
889 | dev_priv = ring->dev->dev_private; | ||
890 | ret = intel_wait_ring_buffer(ring, ring->size - 8); | ||
891 | I915_WRITE_CTL(ring, 0); | ||
655 | 892 | ||
656 | i915_gem_object_unpin(ring->gem_object); | 893 | drm_core_ioremapfree(&ring->map, ring->dev); |
657 | drm_gem_object_unreference(ring->gem_object); | 894 | |
658 | ring->gem_object = NULL; | 895 | i915_gem_object_unpin(ring->obj); |
896 | drm_gem_object_unreference(&ring->obj->base); | ||
897 | ring->obj = NULL; | ||
659 | 898 | ||
660 | if (ring->cleanup) | 899 | if (ring->cleanup) |
661 | ring->cleanup(ring); | 900 | ring->cleanup(ring); |
662 | 901 | ||
663 | cleanup_status_page(dev, ring); | 902 | cleanup_status_page(ring); |
664 | } | 903 | } |
665 | 904 | ||
666 | static int intel_wrap_ring_buffer(struct drm_device *dev, | 905 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
667 | struct intel_ring_buffer *ring) | ||
668 | { | 906 | { |
669 | unsigned int *virt; | 907 | unsigned int *virt; |
670 | int rem; | 908 | int rem = ring->size - ring->tail; |
671 | rem = ring->size - ring->tail; | ||
672 | 909 | ||
673 | if (ring->space < rem) { | 910 | if (ring->space < rem) { |
674 | int ret = intel_wait_ring_buffer(dev, ring, rem); | 911 | int ret = intel_wait_ring_buffer(ring, rem); |
675 | if (ret) | 912 | if (ret) |
676 | return ret; | 913 | return ret; |
677 | } | 914 | } |
@@ -689,11 +926,12 @@ static int intel_wrap_ring_buffer(struct drm_device *dev, | |||
689 | return 0; | 926 | return 0; |
690 | } | 927 | } |
691 | 928 | ||
692 | int intel_wait_ring_buffer(struct drm_device *dev, | 929 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
693 | struct intel_ring_buffer *ring, int n) | ||
694 | { | 930 | { |
931 | int reread = 0; | ||
932 | struct drm_device *dev = ring->dev; | ||
933 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
695 | unsigned long end; | 934 | unsigned long end; |
696 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
697 | u32 head; | 935 | u32 head; |
698 | 936 | ||
699 | trace_i915_ring_wait_begin (dev); | 937 | trace_i915_ring_wait_begin (dev); |
@@ -703,15 +941,14 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
703 | * fallback to the slow and accurate path. | 941 | * fallback to the slow and accurate path. |
704 | */ | 942 | */ |
705 | head = intel_read_status_page(ring, 4); | 943 | head = intel_read_status_page(ring, 4); |
706 | if (head < ring->actual_head) | 944 | if (reread) |
707 | head = I915_READ_HEAD(ring); | 945 | head = I915_READ_HEAD(ring); |
708 | ring->actual_head = head; | ||
709 | ring->head = head & HEAD_ADDR; | 946 | ring->head = head & HEAD_ADDR; |
710 | ring->space = ring->head - (ring->tail + 8); | 947 | ring->space = ring->head - (ring->tail + 8); |
711 | if (ring->space < 0) | 948 | if (ring->space < 0) |
712 | ring->space += ring->size; | 949 | ring->space += ring->size; |
713 | if (ring->space >= n) { | 950 | if (ring->space >= n) { |
714 | trace_i915_ring_wait_end (dev); | 951 | trace_i915_ring_wait_end(dev); |
715 | return 0; | 952 | return 0; |
716 | } | 953 | } |
717 | 954 | ||
@@ -722,29 +959,40 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
722 | } | 959 | } |
723 | 960 | ||
724 | msleep(1); | 961 | msleep(1); |
962 | if (atomic_read(&dev_priv->mm.wedged)) | ||
963 | return -EAGAIN; | ||
964 | reread = 1; | ||
725 | } while (!time_after(jiffies, end)); | 965 | } while (!time_after(jiffies, end)); |
726 | trace_i915_ring_wait_end (dev); | 966 | trace_i915_ring_wait_end (dev); |
727 | return -EBUSY; | 967 | return -EBUSY; |
728 | } | 968 | } |
729 | 969 | ||
730 | void intel_ring_begin(struct drm_device *dev, | 970 | int intel_ring_begin(struct intel_ring_buffer *ring, |
731 | struct intel_ring_buffer *ring, | 971 | int num_dwords) |
732 | int num_dwords) | ||
733 | { | 972 | { |
734 | int n = 4*num_dwords; | 973 | int n = 4*num_dwords; |
735 | if (unlikely(ring->tail + n > ring->size)) | 974 | int ret; |
736 | intel_wrap_ring_buffer(dev, ring); | 975 | |
737 | if (unlikely(ring->space < n)) | 976 | if (unlikely(ring->tail + n > ring->effective_size)) { |
738 | intel_wait_ring_buffer(dev, ring, n); | 977 | ret = intel_wrap_ring_buffer(ring); |
978 | if (unlikely(ret)) | ||
979 | return ret; | ||
980 | } | ||
981 | |||
982 | if (unlikely(ring->space < n)) { | ||
983 | ret = intel_wait_ring_buffer(ring, n); | ||
984 | if (unlikely(ret)) | ||
985 | return ret; | ||
986 | } | ||
739 | 987 | ||
740 | ring->space -= n; | 988 | ring->space -= n; |
989 | return 0; | ||
741 | } | 990 | } |
742 | 991 | ||
743 | void intel_ring_advance(struct drm_device *dev, | 992 | void intel_ring_advance(struct intel_ring_buffer *ring) |
744 | struct intel_ring_buffer *ring) | ||
745 | { | 993 | { |
746 | ring->tail &= ring->size - 1; | 994 | ring->tail &= ring->size - 1; |
747 | ring->write_tail(dev, ring, ring->tail); | 995 | ring->write_tail(ring, ring->tail); |
748 | } | 996 | } |
749 | 997 | ||
750 | static const struct intel_ring_buffer render_ring = { | 998 | static const struct intel_ring_buffer render_ring = { |
@@ -756,10 +1004,11 @@ static const struct intel_ring_buffer render_ring = { | |||
756 | .write_tail = ring_write_tail, | 1004 | .write_tail = ring_write_tail, |
757 | .flush = render_ring_flush, | 1005 | .flush = render_ring_flush, |
758 | .add_request = render_ring_add_request, | 1006 | .add_request = render_ring_add_request, |
759 | .get_seqno = render_ring_get_seqno, | 1007 | .get_seqno = ring_get_seqno, |
760 | .user_irq_get = render_ring_get_user_irq, | 1008 | .irq_get = render_ring_get_irq, |
761 | .user_irq_put = render_ring_put_user_irq, | 1009 | .irq_put = render_ring_put_irq, |
762 | .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, | 1010 | .dispatch_execbuffer = render_ring_dispatch_execbuffer, |
1011 | .cleanup = render_ring_cleanup, | ||
763 | }; | 1012 | }; |
764 | 1013 | ||
765 | /* ring buffer for bit-stream decoder */ | 1014 | /* ring buffer for bit-stream decoder */ |
@@ -769,22 +1018,21 @@ static const struct intel_ring_buffer bsd_ring = { | |||
769 | .id = RING_BSD, | 1018 | .id = RING_BSD, |
770 | .mmio_base = BSD_RING_BASE, | 1019 | .mmio_base = BSD_RING_BASE, |
771 | .size = 32 * PAGE_SIZE, | 1020 | .size = 32 * PAGE_SIZE, |
772 | .init = init_bsd_ring, | 1021 | .init = init_ring_common, |
773 | .write_tail = ring_write_tail, | 1022 | .write_tail = ring_write_tail, |
774 | .flush = bsd_ring_flush, | 1023 | .flush = bsd_ring_flush, |
775 | .add_request = ring_add_request, | 1024 | .add_request = ring_add_request, |
776 | .get_seqno = ring_status_page_get_seqno, | 1025 | .get_seqno = ring_get_seqno, |
777 | .user_irq_get = bsd_ring_get_user_irq, | 1026 | .irq_get = bsd_ring_get_irq, |
778 | .user_irq_put = bsd_ring_put_user_irq, | 1027 | .irq_put = bsd_ring_put_irq, |
779 | .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, | 1028 | .dispatch_execbuffer = ring_dispatch_execbuffer, |
780 | }; | 1029 | }; |
781 | 1030 | ||
782 | 1031 | ||
783 | static void gen6_bsd_ring_write_tail(struct drm_device *dev, | 1032 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
784 | struct intel_ring_buffer *ring, | ||
785 | u32 value) | 1033 | u32 value) |
786 | { | 1034 | { |
787 | drm_i915_private_t *dev_priv = dev->dev_private; | 1035 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
788 | 1036 | ||
789 | /* Every tail move must follow the sequence below */ | 1037 | /* Every tail move must follow the sequence below */ |
790 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, | 1038 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
@@ -803,69 +1051,109 @@ static void gen6_bsd_ring_write_tail(struct drm_device *dev, | |||
803 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); | 1051 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); |
804 | } | 1052 | } |
805 | 1053 | ||
806 | static void gen6_ring_flush(struct drm_device *dev, | 1054 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
807 | struct intel_ring_buffer *ring, | 1055 | u32 invalidate_domains, |
808 | u32 invalidate_domains, | 1056 | u32 flush_domains) |
809 | u32 flush_domains) | ||
810 | { | 1057 | { |
811 | intel_ring_begin(dev, ring, 4); | 1058 | int ret; |
812 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | 1059 | |
813 | intel_ring_emit(dev, ring, 0); | 1060 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) |
814 | intel_ring_emit(dev, ring, 0); | 1061 | return 0; |
815 | intel_ring_emit(dev, ring, 0); | 1062 | |
816 | intel_ring_advance(dev, ring); | 1063 | ret = intel_ring_begin(ring, 4); |
1064 | if (ret) | ||
1065 | return ret; | ||
1066 | |||
1067 | intel_ring_emit(ring, MI_FLUSH_DW); | ||
1068 | intel_ring_emit(ring, 0); | ||
1069 | intel_ring_emit(ring, 0); | ||
1070 | intel_ring_emit(ring, 0); | ||
1071 | intel_ring_advance(ring); | ||
1072 | return 0; | ||
817 | } | 1073 | } |
818 | 1074 | ||
819 | static int | 1075 | static int |
820 | gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, | 1076 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
821 | struct intel_ring_buffer *ring, | 1077 | u32 offset, u32 len) |
822 | struct drm_i915_gem_execbuffer2 *exec, | ||
823 | struct drm_clip_rect *cliprects, | ||
824 | uint64_t exec_offset) | ||
825 | { | 1078 | { |
826 | uint32_t exec_start; | 1079 | int ret; |
827 | 1080 | ||
828 | exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | 1081 | ret = intel_ring_begin(ring, 2); |
1082 | if (ret) | ||
1083 | return ret; | ||
829 | 1084 | ||
830 | intel_ring_begin(dev, ring, 2); | 1085 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); |
831 | intel_ring_emit(dev, ring, | ||
832 | MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); | ||
833 | /* bit0-7 is the length on GEN6+ */ | 1086 | /* bit0-7 is the length on GEN6+ */ |
834 | intel_ring_emit(dev, ring, exec_start); | 1087 | intel_ring_emit(ring, offset); |
835 | intel_ring_advance(dev, ring); | 1088 | intel_ring_advance(ring); |
836 | 1089 | ||
837 | return 0; | 1090 | return 0; |
838 | } | 1091 | } |
839 | 1092 | ||
1093 | static bool | ||
1094 | gen6_render_ring_get_irq(struct intel_ring_buffer *ring) | ||
1095 | { | ||
1096 | return gen6_ring_get_irq(ring, | ||
1097 | GT_USER_INTERRUPT, | ||
1098 | GEN6_RENDER_USER_INTERRUPT); | ||
1099 | } | ||
1100 | |||
1101 | static void | ||
1102 | gen6_render_ring_put_irq(struct intel_ring_buffer *ring) | ||
1103 | { | ||
1104 | return gen6_ring_put_irq(ring, | ||
1105 | GT_USER_INTERRUPT, | ||
1106 | GEN6_RENDER_USER_INTERRUPT); | ||
1107 | } | ||
1108 | |||
1109 | static bool | ||
1110 | gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) | ||
1111 | { | ||
1112 | return gen6_ring_get_irq(ring, | ||
1113 | GT_GEN6_BSD_USER_INTERRUPT, | ||
1114 | GEN6_BSD_USER_INTERRUPT); | ||
1115 | } | ||
1116 | |||
1117 | static void | ||
1118 | gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) | ||
1119 | { | ||
1120 | return gen6_ring_put_irq(ring, | ||
1121 | GT_GEN6_BSD_USER_INTERRUPT, | ||
1122 | GEN6_BSD_USER_INTERRUPT); | ||
1123 | } | ||
1124 | |||
840 | /* ring buffer for Video Codec for Gen6+ */ | 1125 | /* ring buffer for Video Codec for Gen6+ */ |
841 | static const struct intel_ring_buffer gen6_bsd_ring = { | 1126 | static const struct intel_ring_buffer gen6_bsd_ring = { |
842 | .name = "gen6 bsd ring", | 1127 | .name = "gen6 bsd ring", |
843 | .id = RING_BSD, | 1128 | .id = RING_BSD, |
844 | .mmio_base = GEN6_BSD_RING_BASE, | 1129 | .mmio_base = GEN6_BSD_RING_BASE, |
845 | .size = 32 * PAGE_SIZE, | 1130 | .size = 32 * PAGE_SIZE, |
846 | .init = init_bsd_ring, | 1131 | .init = init_ring_common, |
847 | .write_tail = gen6_bsd_ring_write_tail, | 1132 | .write_tail = gen6_bsd_ring_write_tail, |
848 | .flush = gen6_ring_flush, | 1133 | .flush = gen6_ring_flush, |
849 | .add_request = ring_add_request, | 1134 | .add_request = gen6_add_request, |
850 | .get_seqno = ring_status_page_get_seqno, | 1135 | .get_seqno = ring_get_seqno, |
851 | .user_irq_get = bsd_ring_get_user_irq, | 1136 | .irq_get = gen6_bsd_ring_get_irq, |
852 | .user_irq_put = bsd_ring_put_user_irq, | 1137 | .irq_put = gen6_bsd_ring_put_irq, |
853 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | 1138 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
854 | }; | 1139 | }; |
855 | 1140 | ||
856 | /* Blitter support (SandyBridge+) */ | 1141 | /* Blitter support (SandyBridge+) */ |
857 | 1142 | ||
858 | static void | 1143 | static bool |
859 | blt_ring_get_user_irq(struct drm_device *dev, | 1144 | blt_ring_get_irq(struct intel_ring_buffer *ring) |
860 | struct intel_ring_buffer *ring) | ||
861 | { | 1145 | { |
862 | /* do nothing */ | 1146 | return gen6_ring_get_irq(ring, |
1147 | GT_BLT_USER_INTERRUPT, | ||
1148 | GEN6_BLITTER_USER_INTERRUPT); | ||
863 | } | 1149 | } |
1150 | |||
864 | static void | 1151 | static void |
865 | blt_ring_put_user_irq(struct drm_device *dev, | 1152 | blt_ring_put_irq(struct intel_ring_buffer *ring) |
866 | struct intel_ring_buffer *ring) | ||
867 | { | 1153 | { |
868 | /* do nothing */ | 1154 | gen6_ring_put_irq(ring, |
1155 | GT_BLT_USER_INTERRUPT, | ||
1156 | GEN6_BLITTER_USER_INTERRUPT); | ||
869 | } | 1157 | } |
870 | 1158 | ||
871 | 1159 | ||
@@ -883,32 +1171,31 @@ to_blt_workaround(struct intel_ring_buffer *ring) | |||
883 | return ring->private; | 1171 | return ring->private; |
884 | } | 1172 | } |
885 | 1173 | ||
886 | static int blt_ring_init(struct drm_device *dev, | 1174 | static int blt_ring_init(struct intel_ring_buffer *ring) |
887 | struct intel_ring_buffer *ring) | ||
888 | { | 1175 | { |
889 | if (NEED_BLT_WORKAROUND(dev)) { | 1176 | if (NEED_BLT_WORKAROUND(ring->dev)) { |
890 | struct drm_i915_gem_object *obj; | 1177 | struct drm_i915_gem_object *obj; |
891 | u32 __iomem *ptr; | 1178 | u32 *ptr; |
892 | int ret; | 1179 | int ret; |
893 | 1180 | ||
894 | obj = to_intel_bo(i915_gem_alloc_object(dev, 4096)); | 1181 | obj = i915_gem_alloc_object(ring->dev, 4096); |
895 | if (obj == NULL) | 1182 | if (obj == NULL) |
896 | return -ENOMEM; | 1183 | return -ENOMEM; |
897 | 1184 | ||
898 | ret = i915_gem_object_pin(&obj->base, 4096); | 1185 | ret = i915_gem_object_pin(obj, 4096, true); |
899 | if (ret) { | 1186 | if (ret) { |
900 | drm_gem_object_unreference(&obj->base); | 1187 | drm_gem_object_unreference(&obj->base); |
901 | return ret; | 1188 | return ret; |
902 | } | 1189 | } |
903 | 1190 | ||
904 | ptr = kmap(obj->pages[0]); | 1191 | ptr = kmap(obj->pages[0]); |
905 | iowrite32(MI_BATCH_BUFFER_END, ptr); | 1192 | *ptr++ = MI_BATCH_BUFFER_END; |
906 | iowrite32(MI_NOOP, ptr+1); | 1193 | *ptr++ = MI_NOOP; |
907 | kunmap(obj->pages[0]); | 1194 | kunmap(obj->pages[0]); |
908 | 1195 | ||
909 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); | 1196 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
910 | if (ret) { | 1197 | if (ret) { |
911 | i915_gem_object_unpin(&obj->base); | 1198 | i915_gem_object_unpin(obj); |
912 | drm_gem_object_unreference(&obj->base); | 1199 | drm_gem_object_unreference(&obj->base); |
913 | return ret; | 1200 | return ret; |
914 | } | 1201 | } |
@@ -916,51 +1203,44 @@ static int blt_ring_init(struct drm_device *dev, | |||
916 | ring->private = obj; | 1203 | ring->private = obj; |
917 | } | 1204 | } |
918 | 1205 | ||
919 | return init_ring_common(dev, ring); | 1206 | return init_ring_common(ring); |
920 | } | 1207 | } |
921 | 1208 | ||
922 | static void blt_ring_begin(struct drm_device *dev, | 1209 | static int blt_ring_begin(struct intel_ring_buffer *ring, |
923 | struct intel_ring_buffer *ring, | ||
924 | int num_dwords) | 1210 | int num_dwords) |
925 | { | 1211 | { |
926 | if (ring->private) { | 1212 | if (ring->private) { |
927 | intel_ring_begin(dev, ring, num_dwords+2); | 1213 | int ret = intel_ring_begin(ring, num_dwords+2); |
928 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START); | 1214 | if (ret) |
929 | intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset); | 1215 | return ret; |
1216 | |||
1217 | intel_ring_emit(ring, MI_BATCH_BUFFER_START); | ||
1218 | intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset); | ||
1219 | |||
1220 | return 0; | ||
930 | } else | 1221 | } else |
931 | intel_ring_begin(dev, ring, 4); | 1222 | return intel_ring_begin(ring, 4); |
932 | } | 1223 | } |
933 | 1224 | ||
934 | static void blt_ring_flush(struct drm_device *dev, | 1225 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
935 | struct intel_ring_buffer *ring, | ||
936 | u32 invalidate_domains, | 1226 | u32 invalidate_domains, |
937 | u32 flush_domains) | 1227 | u32 flush_domains) |
938 | { | 1228 | { |
939 | blt_ring_begin(dev, ring, 4); | 1229 | int ret; |
940 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | ||
941 | intel_ring_emit(dev, ring, 0); | ||
942 | intel_ring_emit(dev, ring, 0); | ||
943 | intel_ring_emit(dev, ring, 0); | ||
944 | intel_ring_advance(dev, ring); | ||
945 | } | ||
946 | 1230 | ||
947 | static u32 | 1231 | if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) |
948 | blt_ring_add_request(struct drm_device *dev, | 1232 | return 0; |
949 | struct intel_ring_buffer *ring, | ||
950 | u32 flush_domains) | ||
951 | { | ||
952 | u32 seqno = i915_gem_get_seqno(dev); | ||
953 | 1233 | ||
954 | blt_ring_begin(dev, ring, 4); | 1234 | ret = blt_ring_begin(ring, 4); |
955 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | 1235 | if (ret) |
956 | intel_ring_emit(dev, ring, | 1236 | return ret; |
957 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
958 | intel_ring_emit(dev, ring, seqno); | ||
959 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); | ||
960 | intel_ring_advance(dev, ring); | ||
961 | 1237 | ||
962 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | 1238 | intel_ring_emit(ring, MI_FLUSH_DW); |
963 | return seqno; | 1239 | intel_ring_emit(ring, 0); |
1240 | intel_ring_emit(ring, 0); | ||
1241 | intel_ring_emit(ring, 0); | ||
1242 | intel_ring_advance(ring); | ||
1243 | return 0; | ||
964 | } | 1244 | } |
965 | 1245 | ||
966 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) | 1246 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) |
@@ -981,47 +1261,56 @@ static const struct intel_ring_buffer gen6_blt_ring = { | |||
981 | .init = blt_ring_init, | 1261 | .init = blt_ring_init, |
982 | .write_tail = ring_write_tail, | 1262 | .write_tail = ring_write_tail, |
983 | .flush = blt_ring_flush, | 1263 | .flush = blt_ring_flush, |
984 | .add_request = blt_ring_add_request, | 1264 | .add_request = gen6_add_request, |
985 | .get_seqno = ring_status_page_get_seqno, | 1265 | .get_seqno = ring_get_seqno, |
986 | .user_irq_get = blt_ring_get_user_irq, | 1266 | .irq_get = blt_ring_get_irq, |
987 | .user_irq_put = blt_ring_put_user_irq, | 1267 | .irq_put = blt_ring_put_irq, |
988 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | 1268 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
989 | .cleanup = blt_ring_cleanup, | 1269 | .cleanup = blt_ring_cleanup, |
990 | }; | 1270 | }; |
991 | 1271 | ||
992 | int intel_init_render_ring_buffer(struct drm_device *dev) | 1272 | int intel_init_render_ring_buffer(struct drm_device *dev) |
993 | { | 1273 | { |
994 | drm_i915_private_t *dev_priv = dev->dev_private; | 1274 | drm_i915_private_t *dev_priv = dev->dev_private; |
995 | 1275 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; | |
996 | dev_priv->render_ring = render_ring; | 1276 | |
1277 | *ring = render_ring; | ||
1278 | if (INTEL_INFO(dev)->gen >= 6) { | ||
1279 | ring->add_request = gen6_add_request; | ||
1280 | ring->irq_get = gen6_render_ring_get_irq; | ||
1281 | ring->irq_put = gen6_render_ring_put_irq; | ||
1282 | } else if (IS_GEN5(dev)) { | ||
1283 | ring->add_request = pc_render_add_request; | ||
1284 | ring->get_seqno = pc_render_get_seqno; | ||
1285 | } | ||
997 | 1286 | ||
998 | if (!I915_NEED_GFX_HWS(dev)) { | 1287 | if (!I915_NEED_GFX_HWS(dev)) { |
999 | dev_priv->render_ring.status_page.page_addr | 1288 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
1000 | = dev_priv->status_page_dmah->vaddr; | 1289 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
1001 | memset(dev_priv->render_ring.status_page.page_addr, | ||
1002 | 0, PAGE_SIZE); | ||
1003 | } | 1290 | } |
1004 | 1291 | ||
1005 | return intel_init_ring_buffer(dev, &dev_priv->render_ring); | 1292 | return intel_init_ring_buffer(dev, ring); |
1006 | } | 1293 | } |
1007 | 1294 | ||
1008 | int intel_init_bsd_ring_buffer(struct drm_device *dev) | 1295 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
1009 | { | 1296 | { |
1010 | drm_i915_private_t *dev_priv = dev->dev_private; | 1297 | drm_i915_private_t *dev_priv = dev->dev_private; |
1298 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; | ||
1011 | 1299 | ||
1012 | if (IS_GEN6(dev)) | 1300 | if (IS_GEN6(dev)) |
1013 | dev_priv->bsd_ring = gen6_bsd_ring; | 1301 | *ring = gen6_bsd_ring; |
1014 | else | 1302 | else |
1015 | dev_priv->bsd_ring = bsd_ring; | 1303 | *ring = bsd_ring; |
1016 | 1304 | ||
1017 | return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); | 1305 | return intel_init_ring_buffer(dev, ring); |
1018 | } | 1306 | } |
1019 | 1307 | ||
1020 | int intel_init_blt_ring_buffer(struct drm_device *dev) | 1308 | int intel_init_blt_ring_buffer(struct drm_device *dev) |
1021 | { | 1309 | { |
1022 | drm_i915_private_t *dev_priv = dev->dev_private; | 1310 | drm_i915_private_t *dev_priv = dev->dev_private; |
1311 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; | ||
1023 | 1312 | ||
1024 | dev_priv->blt_ring = gen6_blt_ring; | 1313 | *ring = gen6_blt_ring; |
1025 | 1314 | ||
1026 | return intel_init_ring_buffer(dev, &dev_priv->blt_ring); | 1315 | return intel_init_ring_buffer(dev, ring); |
1027 | } | 1316 | } |