aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorJohn Harrison <John.C.Harrison@Intel.com>2015-06-30 07:40:55 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-07-03 01:38:59 -0400
commit79bbcc299fca92ba3558c4966e6ad52ee1052d89 (patch)
tree0e0a57996f0e1e0ba94ef8ae2efe682026ff7ca1 /drivers/gpu/drm/i915/intel_ringbuffer.c
parent793dfa59bcfde9d642295480674926827e9adcfc (diff)
drm/i915: Reserve space improvements
An earlier patch was added to reserve space in the ring buffer for the commands issued during 'add_request()'. The initial version was pessimistic in the way it handled buffer wrapping and would cause premature wraps and thus waste ring space. This patch updates the code to better handle the wrap case. It no longer enforces that the space being asked for and the reserved space are a single contiguous block. Instead, it allows the reserve to be on the far end of a wrap operation. It still guarantees that the space is available so when the wrap occurs, no wait will happen. Thus the wrap cannot fail which is the whole point of the exercise. Also fixed a merge failure with some comments from the original patch. v2: Incorporated suggestion by David Gordon to move the wrap code inside the prepare function and thus allow a single combined wait_for_space() call rather than doing one before the wrap and another after. This also makes the prepare code much simpler and easier to follow. v3: Fix for 'effective_size' vs 'size' during ring buffer remainder calculations (spotted by Tomas Elf). For: VIZ-5115 CC: Daniel Vetter <daniel@ffwll.ch> Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Tomas Elf <tomas.elf@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c90
1 files changed, 51 insertions, 39 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index af7c12ed0ba7..e39c8912f673 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2121,12 +2121,12 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
2121 unsigned space; 2121 unsigned space;
2122 int ret; 2122 int ret;
2123 2123
2124 /* The whole point of reserving space is to not wait! */
2125 WARN_ON(ringbuf->reserved_in_use);
2126
2127 if (intel_ring_space(ringbuf) >= n) 2124 if (intel_ring_space(ringbuf) >= n)
2128 return 0; 2125 return 0;
2129 2126
2127 /* The whole point of reserving space is to not wait! */
2128 WARN_ON(ringbuf->reserved_in_use);
2129
2130 list_for_each_entry(request, &ring->request_list, list) { 2130 list_for_each_entry(request, &ring->request_list, list) {
2131 space = __intel_ring_space(request->postfix, ringbuf->tail, 2131 space = __intel_ring_space(request->postfix, ringbuf->tail,
2132 ringbuf->size); 2132 ringbuf->size);
@@ -2145,21 +2145,11 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
2145 return 0; 2145 return 0;
2146} 2146}
2147 2147
2148static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) 2148static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
2149{ 2149{
2150 uint32_t __iomem *virt; 2150 uint32_t __iomem *virt;
2151 struct intel_ringbuffer *ringbuf = ring->buffer;
2152 int rem = ringbuf->size - ringbuf->tail; 2151 int rem = ringbuf->size - ringbuf->tail;
2153 2152
2154 /* Can't wrap if space has already been reserved! */
2155 WARN_ON(ringbuf->reserved_in_use);
2156
2157 if (ringbuf->space < rem) {
2158 int ret = ring_wait_for_space(ring, rem);
2159 if (ret)
2160 return ret;
2161 }
2162
2163 virt = ringbuf->virtual_start + ringbuf->tail; 2153 virt = ringbuf->virtual_start + ringbuf->tail;
2164 rem /= 4; 2154 rem /= 4;
2165 while (rem--) 2155 while (rem--)
@@ -2167,8 +2157,6 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
2167 2157
2168 ringbuf->tail = 0; 2158 ringbuf->tail = 0;
2169 intel_ring_update_space(ringbuf); 2159 intel_ring_update_space(ringbuf);
2170
2171 return 0;
2172} 2160}
2173 2161
2174int intel_ring_idle(struct intel_engine_cs *ring) 2162int intel_ring_idle(struct intel_engine_cs *ring)
@@ -2238,9 +2226,21 @@ void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
2238void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) 2226void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
2239{ 2227{
2240 WARN_ON(!ringbuf->reserved_in_use); 2228 WARN_ON(!ringbuf->reserved_in_use);
2241 WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size, 2229 if (ringbuf->tail > ringbuf->reserved_tail) {
2242 "request reserved size too small: %d vs %d!\n", 2230 WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
2243 ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size); 2231 "request reserved size too small: %d vs %d!\n",
2232 ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
2233 } else {
2234 /*
2235 * The ring was wrapped while the reserved space was in use.
2236 * That means that some unknown amount of the ring tail was
2237 * no-op filled and skipped. Thus simply adding the ring size
2238 * to the tail and doing the above space check will not work.
2239 * Rather than attempt to track how much tail was skipped,
2240 * it is much simpler to say that also skipping the sanity
2241 * check every once in a while is not a big issue.
2242 */
2243 }
2244 2244
2245 ringbuf->reserved_size = 0; 2245 ringbuf->reserved_size = 0;
2246 ringbuf->reserved_in_use = false; 2246 ringbuf->reserved_in_use = false;
@@ -2249,33 +2249,45 @@ void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
2249static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes) 2249static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
2250{ 2250{
2251 struct intel_ringbuffer *ringbuf = ring->buffer; 2251 struct intel_ringbuffer *ringbuf = ring->buffer;
2252 int ret; 2252 int remain_usable = ringbuf->effective_size - ringbuf->tail;
2253 2253 int remain_actual = ringbuf->size - ringbuf->tail;
2254 /* 2254 int ret, total_bytes, wait_bytes = 0;
2255 * Add on the reserved size to the request to make sure that after 2255 bool need_wrap = false;
2256 * the intended commands have been emitted, there is guaranteed to
2257 * still be enough free space to send them to the hardware.
2258 */
2259 if (!ringbuf->reserved_in_use)
2260 bytes += ringbuf->reserved_size;
2261
2262 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
2263 ret = intel_wrap_ring_buffer(ring);
2264 if (unlikely(ret))
2265 return ret;
2266 2256
2267 if(ringbuf->reserved_size) { 2257 if (ringbuf->reserved_in_use)
2268 uint32_t size = ringbuf->reserved_size; 2258 total_bytes = bytes;
2259 else
2260 total_bytes = bytes + ringbuf->reserved_size;
2269 2261
2270 intel_ring_reserved_space_cancel(ringbuf); 2262 if (unlikely(bytes > remain_usable)) {
2271 intel_ring_reserved_space_reserve(ringbuf, size); 2263 /*
2264 * Not enough space for the basic request. So need to flush
2265 * out the remainder and then wait for base + reserved.
2266 */
2267 wait_bytes = remain_actual + total_bytes;
2268 need_wrap = true;
2269 } else {
2270 if (unlikely(total_bytes > remain_usable)) {
2271 /*
2272 * The base request will fit but the reserved space
2273 * falls off the end. So only need to to wait for the
2274 * reserved size after flushing out the remainder.
2275 */
2276 wait_bytes = remain_actual + ringbuf->reserved_size;
2277 need_wrap = true;
2278 } else if (total_bytes > ringbuf->space) {
2279 /* No wrapping required, just waiting. */
2280 wait_bytes = total_bytes;
2272 } 2281 }
2273 } 2282 }
2274 2283
2275 if (unlikely(ringbuf->space < bytes)) { 2284 if (wait_bytes) {
2276 ret = ring_wait_for_space(ring, bytes); 2285 ret = ring_wait_for_space(ring, wait_bytes);
2277 if (unlikely(ret)) 2286 if (unlikely(ret))
2278 return ret; 2287 return ret;
2288
2289 if (need_wrap)
2290 __wrap_ring_buffer(ringbuf);
2279 } 2291 }
2280 2292
2281 return 0; 2293 return 0;