aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_breadcrumbs.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-16 10:22:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-16 10:22:42 -0400
commitcc7ce90153e74f8266eefee9fba466faa1a2d5df (patch)
tree7ebac4bc27c2d400aca256c0b557c561540543e2 /drivers/gpu/drm/i915/intel_breadcrumbs.c
parent83f3ef3de625a5766de2382f9e077d4daafd5bac (diff)
parent8da0e1525b7f0d69c6cb44094963906282b32673 (diff)
Merge tag 'drm-next-2019-05-16' of git://anongit.freedesktop.org/drm/drm
Pull drm fixes from Dave Airlie: "A bunch of fixes for the merge window closure, doesn't seem to be anything too major or serious in there. It does add TU117 turing modesetting to nouveau but it's just an enable for preexisting code. amdgpu: - gpu reset at load crash fix - ATPX hotplug fix for when dGPU is off - SR-IOV fixes radeon: - r5xx pll fixes i915: - GVT (MCHBAR, buffer alignment, misc warnings fixes) - Fixes for newly enabled semaphore code - Geminilake disable framebuffer compression - HSW edp fast modeset fix - IRQ vs RCU race fix nouveau: - Turing modesetting fixes - TU117 support msm: - SDM845 bringup fixes panfrost: - static checker fixes pl111: - spinlock init fix. bridge: - refresh rate register fix for adv7511" * tag 'drm-next-2019-05-16' of git://anongit.freedesktop.org/drm/drm: (36 commits) drm/msm: Upgrade gxpd checks to IS_ERR_OR_NULL drm/msm/dpu: Remove duplicate header drm/pl111: Initialize clock spinlock early drm/msm: correct attempted NULL pointer dereference in debugfs drm/msm: remove resv fields from msm_gem_object struct drm/nouveau: fix duplication of nv50_head_atom struct drm/nouveau/disp/dp: respect sink limits when selecting failsafe link configuration drm/nouveau/core: initial support for boards with TU117 chipset drm/nouveau/core: allow detected chipset to be overridden drm/nouveau/kms/gf119-gp10x: push HeadSetControlOutputResource() mthd when encoders change drm/nouveau/kms/nv50-: fix bug preventing non-vsync'd page flips drm/nouveau/kms/gv100-: fix spurious window immediate interlocks drm/bridge: adv7511: Fix low refresh rate selection drm/panfrost: Add missing _fini() calls in panfrost_device_fini() drm/panfrost: Only put sync_out if non-NULL drm/i915: Seal races between async GPU cancellation, retirement and signaling drm/i915: Fix fastset vs. pfit on/off on HSW EDP transcoder drm/i915/fbc: disable framebuffer compression on GeminiLake drm/amdgpu/psp: move psp version specific function pointers to early_init drm/radeon: prefer lower reference dividers ...
Diffstat (limited to 'drivers/gpu/drm/i915/intel_breadcrumbs.c')
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c78
1 files changed, 57 insertions, 21 deletions
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 3cbffd400b1b..832cb6b1e9bd 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/kthread.h> 25#include <linux/kthread.h>
26#include <trace/events/dma_fence.h>
26#include <uapi/linux/sched/types.h> 27#include <uapi/linux/sched/types.h>
27 28
28#include "i915_drv.h" 29#include "i915_drv.h"
@@ -80,9 +81,39 @@ static inline bool __request_completed(const struct i915_request *rq)
80 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); 81 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
81} 82}
82 83
84static bool
85__dma_fence_signal(struct dma_fence *fence)
86{
87 return !test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
88}
89
90static void
91__dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp)
92{
93 fence->timestamp = timestamp;
94 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
95 trace_dma_fence_signaled(fence);
96}
97
98static void
99__dma_fence_signal__notify(struct dma_fence *fence)
100{
101 struct dma_fence_cb *cur, *tmp;
102
103 lockdep_assert_held(fence->lock);
104 lockdep_assert_irqs_disabled();
105
106 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
107 INIT_LIST_HEAD(&cur->node);
108 cur->func(fence, cur);
109 }
110 INIT_LIST_HEAD(&fence->cb_list);
111}
112
83void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) 113void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
84{ 114{
85 struct intel_breadcrumbs *b = &engine->breadcrumbs; 115 struct intel_breadcrumbs *b = &engine->breadcrumbs;
116 const ktime_t timestamp = ktime_get();
86 struct intel_context *ce, *cn; 117 struct intel_context *ce, *cn;
87 struct list_head *pos, *next; 118 struct list_head *pos, *next;
88 LIST_HEAD(signal); 119 LIST_HEAD(signal);
@@ -104,6 +135,10 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
104 135
105 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, 136 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
106 &rq->fence.flags)); 137 &rq->fence.flags));
138 clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
139
140 if (!__dma_fence_signal(&rq->fence))
141 continue;
107 142
108 /* 143 /*
109 * Queue for execution after dropping the signaling 144 * Queue for execution after dropping the signaling
@@ -111,14 +146,6 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
111 * more signalers to the same context or engine. 146 * more signalers to the same context or engine.
112 */ 147 */
113 i915_request_get(rq); 148 i915_request_get(rq);
114
115 /*
116 * We may race with direct invocation of
117 * dma_fence_signal(), e.g. i915_request_retire(),
118 * so we need to acquire our reference to the request
119 * before we cancel the breadcrumb.
120 */
121 clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
122 list_add_tail(&rq->signal_link, &signal); 149 list_add_tail(&rq->signal_link, &signal);
123 } 150 }
124 151
@@ -141,7 +168,12 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
141 struct i915_request *rq = 168 struct i915_request *rq =
142 list_entry(pos, typeof(*rq), signal_link); 169 list_entry(pos, typeof(*rq), signal_link);
143 170
144 dma_fence_signal(&rq->fence); 171 __dma_fence_signal__timestamp(&rq->fence, timestamp);
172
173 spin_lock(&rq->lock);
174 __dma_fence_signal__notify(&rq->fence);
175 spin_unlock(&rq->lock);
176
145 i915_request_put(rq); 177 i915_request_put(rq);
146 } 178 }
147} 179}
@@ -243,19 +275,17 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
243 275
244bool i915_request_enable_breadcrumb(struct i915_request *rq) 276bool i915_request_enable_breadcrumb(struct i915_request *rq)
245{ 277{
246 struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; 278 lockdep_assert_held(&rq->lock);
247 279 lockdep_assert_irqs_disabled();
248 GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
249 280
250 if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) 281 if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
251 return true; 282 struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
252
253 spin_lock(&b->irq_lock);
254 if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) &&
255 !__request_completed(rq)) {
256 struct intel_context *ce = rq->hw_context; 283 struct intel_context *ce = rq->hw_context;
257 struct list_head *pos; 284 struct list_head *pos;
258 285
286 spin_lock(&b->irq_lock);
287 GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
288
259 __intel_breadcrumbs_arm_irq(b); 289 __intel_breadcrumbs_arm_irq(b);
260 290
261 /* 291 /*
@@ -284,8 +314,8 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
284 list_move_tail(&ce->signal_link, &b->signalers); 314 list_move_tail(&ce->signal_link, &b->signalers);
285 315
286 set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); 316 set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
317 spin_unlock(&b->irq_lock);
287 } 318 }
288 spin_unlock(&b->irq_lock);
289 319
290 return !__request_completed(rq); 320 return !__request_completed(rq);
291} 321}
@@ -294,9 +324,15 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
294{ 324{
295 struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; 325 struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
296 326
297 if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) 327 lockdep_assert_held(&rq->lock);
298 return; 328 lockdep_assert_irqs_disabled();
299 329
330 /*
331 * We must wait for b->irq_lock so that we know the interrupt handler
332 * has released its reference to the intel_context and has completed
333 * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if
334 * required).
335 */
300 spin_lock(&b->irq_lock); 336 spin_lock(&b->irq_lock);
301 if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { 337 if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
302 struct intel_context *ce = rq->hw_context; 338 struct intel_context *ce = rq->hw_context;