aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorJohn Harrison <John.C.Harrison@Intel.com>2015-06-18 08:10:09 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-06-23 08:01:56 -0400
commit29b1b415fcd95a2266ab58fc7825bccbffa5c142 (patch)
tree204cd21ccfc24fa228f154875eb454bf7c331a74 /drivers/gpu/drm/i915/intel_ringbuffer.c
parent0b076ecdf343b029c4c2c3a94ffd0199d97aa46c (diff)
drm/i915: Reserve ring buffer space for i915_add_request() commands
It is a bad idea for i915_add_request() to fail. The work will already have been send to the ring and will be processed, but there will not be any tracking or management of that work. The only way the add request call can fail is if it can't write its epilogue commands to the ring (cache flushing, seqno updates, interrupt signalling). The reasons for that are mostly down to running out of ring buffer space and the problems associated with trying to get some more. This patch prevents that situation from happening in the first place. When a request is created, it marks sufficient space as reserved for the epilogue commands. Thus guaranteeing that by the time the epilogue is written, there will be plenty of space for it. Note that a ring_begin() call is required to actually reserve the space (and do any potential waiting). However, that is not currently done at request creation time. This is because the ring_begin() code can allocate a request. Hence calling begin() from the request allocation code would lead to infinite recursion! Later patches in this series remove the need for begin() to do the allocate. At that point, it becomes safe for the allocate to call begin() and really reserve the space. Until then, there is a potential for insufficient space to be available at the point of calling i915_add_request(). However, that would only be in the case where the request was created and immediately submitted without ever calling ring_begin() and adding any work to that request. Which should never happen. And even if it does, and if that request happens to fall down the tiny window of opportunity for failing due to being out of ring space then does it really matter because the request wasn't doing anything in the first place? v2: Updated the 'reserved space too small' warning to include the offending sizes. Added a 'cancel' operation to clean up when a request is abandoned. Added re-initialisation of tracking state after a buffer wrap to keep the sanity checks accurate. v3: Incremented the reserved size to accommodate Ironlake (after finally managing to run on an ILK system). Also fixed missing wrap code in LRC mode. v4: Added extra comment and removed duplicate WARN (feedback from Tomas). For: VIZ-5115 CC: Tomas Elf <tomas.elf@intel.com> Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c71
1 files changed, 69 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b70d25bffb60..0c2bf0ed633d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2113,6 +2113,9 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
2113 unsigned space; 2113 unsigned space;
2114 int ret; 2114 int ret;
2115 2115
2116 /* The whole point of reserving space is to not wait! */
2117 WARN_ON(ringbuf->reserved_in_use);
2118
2116 if (intel_ring_space(ringbuf) >= n) 2119 if (intel_ring_space(ringbuf) >= n)
2117 return 0; 2120 return 0;
2118 2121
@@ -2140,6 +2143,9 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
2140 struct intel_ringbuffer *ringbuf = ring->buffer; 2143 struct intel_ringbuffer *ringbuf = ring->buffer;
2141 int rem = ringbuf->size - ringbuf->tail; 2144 int rem = ringbuf->size - ringbuf->tail;
2142 2145
2146 /* Can't wrap if space has already been reserved! */
2147 WARN_ON(ringbuf->reserved_in_use);
2148
2143 if (ringbuf->space < rem) { 2149 if (ringbuf->space < rem) {
2144 int ret = ring_wait_for_space(ring, rem); 2150 int ret = ring_wait_for_space(ring, rem);
2145 if (ret) 2151 if (ret)
@@ -2190,16 +2196,77 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
2190 return 0; 2196 return 0;
2191} 2197}
2192 2198
2193static int __intel_ring_prepare(struct intel_engine_cs *ring, 2199void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
2194 int bytes) 2200{
2201 /* NB: Until request management is fully tidied up and the OLR is
2202 * removed, there are too many ways for get false hits on this
2203 * anti-recursion check! */
2204 /*WARN_ON(ringbuf->reserved_size);*/
2205 WARN_ON(ringbuf->reserved_in_use);
2206
2207 ringbuf->reserved_size = size;
2208
2209 /*
2210 * Really need to call _begin() here but that currently leads to
2211 * recursion problems! This will be fixed later but for now just
2212 * return and hope for the best. Note that there is only a real
2213 * problem if the create of the request never actually calls _begin()
2214 * but if they are not submitting any work then why did they create
2215 * the request in the first place?
2216 */
2217}
2218
2219void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
2220{
2221 WARN_ON(ringbuf->reserved_in_use);
2222
2223 ringbuf->reserved_size = 0;
2224 ringbuf->reserved_in_use = false;
2225}
2226
2227void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
2228{
2229 WARN_ON(ringbuf->reserved_in_use);
2230
2231 ringbuf->reserved_in_use = true;
2232 ringbuf->reserved_tail = ringbuf->tail;
2233}
2234
2235void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
2236{
2237 WARN_ON(!ringbuf->reserved_in_use);
2238 WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
2239 "request reserved size too small: %d vs %d!\n",
2240 ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
2241
2242 ringbuf->reserved_size = 0;
2243 ringbuf->reserved_in_use = false;
2244}
2245
2246static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
2195{ 2247{
2196 struct intel_ringbuffer *ringbuf = ring->buffer; 2248 struct intel_ringbuffer *ringbuf = ring->buffer;
2197 int ret; 2249 int ret;
2198 2250
2251 /*
2252 * Add on the reserved size to the request to make sure that after
2253 * the intended commands have been emitted, there is guaranteed to
2254 * still be enough free space to send them to the hardware.
2255 */
2256 if (!ringbuf->reserved_in_use)
2257 bytes += ringbuf->reserved_size;
2258
2199 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { 2259 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
2200 ret = intel_wrap_ring_buffer(ring); 2260 ret = intel_wrap_ring_buffer(ring);
2201 if (unlikely(ret)) 2261 if (unlikely(ret))
2202 return ret; 2262 return ret;
2263
2264 if(ringbuf->reserved_size) {
2265 uint32_t size = ringbuf->reserved_size;
2266
2267 intel_ring_reserved_space_cancel(ringbuf);
2268 intel_ring_reserved_space_reserve(ringbuf, size);
2269 }
2203 } 2270 }
2204 2271
2205 if (unlikely(ringbuf->space < bytes)) { 2272 if (unlikely(ringbuf->space < bytes)) {