aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Daniel <thomas.daniel@intel.com>2014-07-24 12:04:39 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-08-14 16:43:47 -0400
commite981e7b17f2b41970e7e2367d4225e0bb3310667 (patch)
treed4109ce705c97f5e635aa7f4d97fa559b5d41cda
parentacdd884a2e1b873995c120d5eabd8cab77f48f20 (diff)
drm/i915/bdw: Handle context switch events
Handle all context status events in the context status buffer on every context switch interrupt. We only remove work from the execlist queue after a context status buffer reports that it has completed and we only attempt to schedule new contexts on interrupt when a previously submitted context completes (unless no contexts are queued, which means the GPU is free). We canot call intel_runtime_pm_get() in an interrupt (or with a spinlock grabbed, FWIW), because it might sleep, which is not a nice thing to do. Instead, do the runtime_pm get/put together with the create/destroy request, and handle the forcewake get/put directly. Signed-off-by: Thomas Daniel <thomas.daniel@intel.com> v2: Unreferencing the context when we are freeing the request might free the backing bo, which requires the struct_mutex to be grabbed, so defer unreferencing and freeing to a bottom half. v3: - Ack the interrupt inmediately, before trying to handle it (fix for missing interrupts by Bob Beckett <robert.beckett@intel.com>). - Update the Context Status Buffer Read Pointer, just in case (spotted by Damien Lespiau). v4: New namespace and multiple rebase changes. v5: Squash with "drm/i915/bdw: Do not call intel_runtime_pm_get() in an interrupt", as suggested by Daniel. Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> [danvet: Checkpatch ...] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c35
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c133
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h3
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
4 files changed, 155 insertions, 17 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 00957fa0b877..f5d6795887d2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1632,6 +1632,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1632 struct drm_i915_private *dev_priv, 1632 struct drm_i915_private *dev_priv,
1633 u32 master_ctl) 1633 u32 master_ctl)
1634{ 1634{
1635 struct intel_engine_cs *ring;
1635 u32 rcs, bcs, vcs; 1636 u32 rcs, bcs, vcs;
1636 uint32_t tmp = 0; 1637 uint32_t tmp = 0;
1637 irqreturn_t ret = IRQ_NONE; 1638 irqreturn_t ret = IRQ_NONE;
@@ -1641,14 +1642,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1641 if (tmp) { 1642 if (tmp) {
1642 I915_WRITE(GEN8_GT_IIR(0), tmp); 1643 I915_WRITE(GEN8_GT_IIR(0), tmp);
1643 ret = IRQ_HANDLED; 1644 ret = IRQ_HANDLED;
1645
1644 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1646 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1645 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1647 ring = &dev_priv->ring[RCS];
1646 if (rcs & GT_RENDER_USER_INTERRUPT) 1648 if (rcs & GT_RENDER_USER_INTERRUPT)
1647 notify_ring(dev, &dev_priv->ring[RCS]); 1649 notify_ring(dev, ring);
1650 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1651 intel_execlists_handle_ctx_events(ring);
1652
1653 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1654 ring = &dev_priv->ring[BCS];
1648 if (bcs & GT_RENDER_USER_INTERRUPT) 1655 if (bcs & GT_RENDER_USER_INTERRUPT)
1649 notify_ring(dev, &dev_priv->ring[BCS]); 1656 notify_ring(dev, ring);
1650 if ((rcs | bcs) & GT_CONTEXT_SWITCH_INTERRUPT) 1657 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1651 DRM_DEBUG_DRIVER("TODO: Context switch\n"); 1658 intel_execlists_handle_ctx_events(ring);
1652 } else 1659 } else
1653 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1660 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1654 } 1661 }
@@ -1658,16 +1665,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1658 if (tmp) { 1665 if (tmp) {
1659 I915_WRITE(GEN8_GT_IIR(1), tmp); 1666 I915_WRITE(GEN8_GT_IIR(1), tmp);
1660 ret = IRQ_HANDLED; 1667 ret = IRQ_HANDLED;
1668
1661 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1669 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1670 ring = &dev_priv->ring[VCS];
1662 if (vcs & GT_RENDER_USER_INTERRUPT) 1671 if (vcs & GT_RENDER_USER_INTERRUPT)
1663 notify_ring(dev, &dev_priv->ring[VCS]); 1672 notify_ring(dev, ring);
1664 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1673 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1665 DRM_DEBUG_DRIVER("TODO: Context switch\n"); 1674 intel_execlists_handle_ctx_events(ring);
1675
1666 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; 1676 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1677 ring = &dev_priv->ring[VCS2];
1667 if (vcs & GT_RENDER_USER_INTERRUPT) 1678 if (vcs & GT_RENDER_USER_INTERRUPT)
1668 notify_ring(dev, &dev_priv->ring[VCS2]); 1679 notify_ring(dev, ring);
1669 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1680 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1670 DRM_DEBUG_DRIVER("TODO: Context switch\n"); 1681 intel_execlists_handle_ctx_events(ring);
1671 } else 1682 } else
1672 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1683 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1673 } 1684 }
@@ -1688,11 +1699,13 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1688 if (tmp) { 1699 if (tmp) {
1689 I915_WRITE(GEN8_GT_IIR(3), tmp); 1700 I915_WRITE(GEN8_GT_IIR(3), tmp);
1690 ret = IRQ_HANDLED; 1701 ret = IRQ_HANDLED;
1702
1691 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1703 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1704 ring = &dev_priv->ring[VECS];
1692 if (vcs & GT_RENDER_USER_INTERRUPT) 1705 if (vcs & GT_RENDER_USER_INTERRUPT)
1693 notify_ring(dev, &dev_priv->ring[VECS]); 1706 notify_ring(dev, ring);
1694 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1707 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1695 DRM_DEBUG_DRIVER("TODO: Context switch\n"); 1708 intel_execlists_handle_ctx_events(ring);
1696 } else 1709 } else
1697 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1710 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1698 } 1711 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e81f5f6c49b9..22f6a7c0cb18 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -49,6 +49,22 @@
49#define RING_ELSP(ring) ((ring)->mmio_base+0x230) 49#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
50#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) 50#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
51#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) 51#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
52#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
53#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
54
55#define RING_EXECLIST_QFULL (1 << 0x2)
56#define RING_EXECLIST1_VALID (1 << 0x3)
57#define RING_EXECLIST0_VALID (1 << 0x4)
58#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
59#define RING_EXECLIST1_ACTIVE (1 << 0x11)
60#define RING_EXECLIST0_ACTIVE (1 << 0x12)
61
62#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
63#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
64#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
65#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
66#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
67#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
52 68
53#define CTX_LRI_HEADER_0 0x01 69#define CTX_LRI_HEADER_0 0x01
54#define CTX_CONTEXT_CONTROL 0x02 70#define CTX_CONTEXT_CONTROL 0x02
@@ -150,6 +166,7 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
150 struct drm_i915_private *dev_priv = ring->dev->dev_private; 166 struct drm_i915_private *dev_priv = ring->dev->dev_private;
151 uint64_t temp = 0; 167 uint64_t temp = 0;
152 uint32_t desc[4]; 168 uint32_t desc[4];
169 unsigned long flags;
153 170
154 /* XXX: You must always write both descriptors in the order below. */ 171 /* XXX: You must always write both descriptors in the order below. */
155 if (ctx_obj1) 172 if (ctx_obj1)
@@ -163,9 +180,17 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
163 desc[3] = (u32)(temp >> 32); 180 desc[3] = (u32)(temp >> 32);
164 desc[2] = (u32)temp; 181 desc[2] = (u32)temp;
165 182
166 /* Set Force Wakeup bit to prevent GT from entering C6 while 183 /* Set Force Wakeup bit to prevent GT from entering C6 while ELSP writes
167 * ELSP writes are in progress */ 184 * are in progress.
168 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 185 *
186 * The other problem is that we can't just call gen6_gt_force_wake_get()
187 * because that function calls intel_runtime_pm_get(), which might sleep.
188 * Instead, we do the runtime_pm_get/put when creating/destroying requests.
189 */
190 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
191 if (dev_priv->uncore.forcewake_count++ == 0)
192 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
193 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
169 194
170 I915_WRITE(RING_ELSP(ring), desc[1]); 195 I915_WRITE(RING_ELSP(ring), desc[1]);
171 I915_WRITE(RING_ELSP(ring), desc[0]); 196 I915_WRITE(RING_ELSP(ring), desc[0]);
@@ -176,7 +201,11 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
176 /* ELSP is a wo register, so use another nearby reg for posting instead */ 201 /* ELSP is a wo register, so use another nearby reg for posting instead */
177 POSTING_READ(RING_EXECLIST_STATUS(ring)); 202 POSTING_READ(RING_EXECLIST_STATUS(ring));
178 203
179 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 204 /* Release Force Wakeup (see the big comment above). */
205 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
206 if (--dev_priv->uncore.forcewake_count == 0)
207 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
208 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
180} 209}
181 210
182static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail) 211static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail)
@@ -224,6 +253,9 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
224{ 253{
225 struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL; 254 struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
226 struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL; 255 struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
256 struct drm_i915_private *dev_priv = ring->dev->dev_private;
257
258 assert_spin_locked(&ring->execlist_lock);
227 259
228 if (list_empty(&ring->execlist_queue)) 260 if (list_empty(&ring->execlist_queue))
229 return; 261 return;
@@ -237,8 +269,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
237 /* Same ctx: ignore first request, as second request 269 /* Same ctx: ignore first request, as second request
238 * will update tail past first request's workload */ 270 * will update tail past first request's workload */
239 list_del(&req0->execlist_link); 271 list_del(&req0->execlist_link);
240 i915_gem_context_unreference(req0->ctx); 272 queue_work(dev_priv->wq, &req0->work);
241 kfree(req0);
242 req0 = cursor; 273 req0 = cursor;
243 } else { 274 } else {
244 req1 = cursor; 275 req1 = cursor;
@@ -251,11 +282,97 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
251 req1 ? req1->tail : 0)); 282 req1 ? req1->tail : 0));
252} 283}
253 284
285static bool execlists_check_remove_request(struct intel_engine_cs *ring,
286 u32 request_id)
287{
288 struct drm_i915_private *dev_priv = ring->dev->dev_private;
289 struct intel_ctx_submit_request *head_req;
290
291 assert_spin_locked(&ring->execlist_lock);
292
293 head_req = list_first_entry_or_null(&ring->execlist_queue,
294 struct intel_ctx_submit_request,
295 execlist_link);
296
297 if (head_req != NULL) {
298 struct drm_i915_gem_object *ctx_obj =
299 head_req->ctx->engine[ring->id].state;
300 if (intel_execlists_ctx_id(ctx_obj) == request_id) {
301 list_del(&head_req->execlist_link);
302 queue_work(dev_priv->wq, &head_req->work);
303 return true;
304 }
305 }
306
307 return false;
308}
309
310void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
311{
312 struct drm_i915_private *dev_priv = ring->dev->dev_private;
313 u32 status_pointer;
314 u8 read_pointer;
315 u8 write_pointer;
316 u32 status;
317 u32 status_id;
318 u32 submit_contexts = 0;
319
320 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
321
322 read_pointer = ring->next_context_status_buffer;
323 write_pointer = status_pointer & 0x07;
324 if (read_pointer > write_pointer)
325 write_pointer += 6;
326
327 spin_lock(&ring->execlist_lock);
328
329 while (read_pointer < write_pointer) {
330 read_pointer++;
331 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
332 (read_pointer % 6) * 8);
333 status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
334 (read_pointer % 6) * 8 + 4);
335
336 if (status & GEN8_CTX_STATUS_COMPLETE) {
337 if (execlists_check_remove_request(ring, status_id))
338 submit_contexts++;
339 }
340 }
341
342 if (submit_contexts != 0)
343 execlists_context_unqueue(ring);
344
345 spin_unlock(&ring->execlist_lock);
346
347 WARN(submit_contexts > 2, "More than two context complete events?\n");
348 ring->next_context_status_buffer = write_pointer % 6;
349
350 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
351 ((u32)ring->next_context_status_buffer & 0x07) << 8);
352}
353
354static void execlists_free_request_task(struct work_struct *work)
355{
356 struct intel_ctx_submit_request *req =
357 container_of(work, struct intel_ctx_submit_request, work);
358 struct drm_device *dev = req->ring->dev;
359 struct drm_i915_private *dev_priv = dev->dev_private;
360
361 intel_runtime_pm_put(dev_priv);
362
363 mutex_lock(&dev->struct_mutex);
364 i915_gem_context_unreference(req->ctx);
365 mutex_unlock(&dev->struct_mutex);
366
367 kfree(req);
368}
369
254static int execlists_context_queue(struct intel_engine_cs *ring, 370static int execlists_context_queue(struct intel_engine_cs *ring,
255 struct intel_context *to, 371 struct intel_context *to,
256 u32 tail) 372 u32 tail)
257{ 373{
258 struct intel_ctx_submit_request *req = NULL; 374 struct intel_ctx_submit_request *req = NULL;
375 struct drm_i915_private *dev_priv = ring->dev->dev_private;
259 unsigned long flags; 376 unsigned long flags;
260 bool was_empty; 377 bool was_empty;
261 378
@@ -266,6 +383,9 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
266 i915_gem_context_reference(req->ctx); 383 i915_gem_context_reference(req->ctx);
267 req->ring = ring; 384 req->ring = ring;
268 req->tail = tail; 385 req->tail = tail;
386 INIT_WORK(&req->work, execlists_free_request_task);
387
388 intel_runtime_pm_get(dev_priv);
269 389
270 spin_lock_irqsave(&ring->execlist_lock, flags); 390 spin_lock_irqsave(&ring->execlist_lock, flags);
271 391
@@ -907,6 +1027,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
907 1027
908 INIT_LIST_HEAD(&ring->execlist_queue); 1028 INIT_LIST_HEAD(&ring->execlist_queue);
909 spin_lock_init(&ring->execlist_lock); 1029 spin_lock_init(&ring->execlist_lock);
1030 ring->next_context_status_buffer = 0;
910 1031
911 ret = intel_lr_context_deferred_create(dctx, ring); 1032 ret = intel_lr_context_deferred_create(dctx, ring);
912 if (ret) 1033 if (ret)
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 3c389b3a2b75..a3f135cf439e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -65,6 +65,9 @@ struct intel_ctx_submit_request {
65 u32 tail; 65 u32 tail;
66 66
67 struct list_head execlist_link; 67 struct list_head execlist_link;
68 struct work_struct work;
68}; 69};
69 70
71void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
72
70#endif /* _INTEL_LRC_H_ */ 73#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 670262dabb6c..9cbf7b0ebc99 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -233,6 +233,7 @@ struct intel_engine_cs {
233 /* Execlists */ 233 /* Execlists */
234 spinlock_t execlist_lock; 234 spinlock_t execlist_lock;
235 struct list_head execlist_queue; 235 struct list_head execlist_queue;
236 u8 next_context_status_buffer;
236 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ 237 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
237 int (*emit_request)(struct intel_ringbuffer *ringbuf); 238 int (*emit_request)(struct intel_ringbuffer *ringbuf);
238 int (*emit_flush)(struct intel_ringbuffer *ringbuf, 239 int (*emit_flush)(struct intel_ringbuffer *ringbuf,