diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-06-01 09:21:23 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-06-02 14:51:03 -0400 |
commit | 30dfebf34b9930277d83b25ec740510007cc4c6d (patch) | |
tree | 5682a4b4ffebb22f8b43267308eb2f354862147c /drivers | |
parent | 493a708179469e3978e50f59902e9d47b6f3dabd (diff) |
drm/i915: extract object active state flushing code
Both busy_ioctl and the new wait_ioct need to do the same dance (or at
least should). Some slight changes:
- busy_ioctl now unconditionally checks for olr. Before emitting a
require flush would have prevent the olr check and hence required a
second call to the busy ioctl to really emit the request.
- the timeout wait now also retires request. Not really required for
abi-reasons, but makes a notch more sense imo.
I've tested this by pimping the i-g-t test some more and also checking
the polling behviour of the wait_rendering_timeout ioctl versus what
busy_ioctl returns.
v2: Too many people complained about unplug, new color is
flush_active.
v3: Kill the comment about the unplug moniker.
v4: s/un-active/inactive/
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 61 |
1 files changed, 29 insertions, 32 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a20ac438b8ef..af67803e635f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2030,6 +2030,31 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) | |||
2030 | } | 2030 | } |
2031 | 2031 | ||
2032 | /** | 2032 | /** |
2033 | * Ensures that an object will eventually get non-busy by flushing any required | ||
2034 | * write domains, emitting any outstanding lazy request and retiring and | ||
2035 | * completed requests. | ||
2036 | */ | ||
2037 | static int | ||
2038 | i915_gem_object_flush_active(struct drm_i915_gem_object *obj) | ||
2039 | { | ||
2040 | int ret; | ||
2041 | |||
2042 | if (obj->active) { | ||
2043 | ret = i915_gem_object_flush_gpu_write_domain(obj); | ||
2044 | if (ret) | ||
2045 | return ret; | ||
2046 | |||
2047 | ret = i915_gem_check_olr(obj->ring, | ||
2048 | obj->last_rendering_seqno); | ||
2049 | if (ret) | ||
2050 | return ret; | ||
2051 | i915_gem_retire_requests_ring(obj->ring); | ||
2052 | } | ||
2053 | |||
2054 | return 0; | ||
2055 | } | ||
2056 | |||
2057 | /** | ||
2033 | * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT | 2058 | * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT |
2034 | * @DRM_IOCTL_ARGS: standard ioctl arguments | 2059 | * @DRM_IOCTL_ARGS: standard ioctl arguments |
2035 | * | 2060 | * |
@@ -2073,11 +2098,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2073 | return -ENOENT; | 2098 | return -ENOENT; |
2074 | } | 2099 | } |
2075 | 2100 | ||
2076 | /* Need to make sure the object is flushed first. This non-obvious | 2101 | /* Need to make sure the object gets inactive eventually. */ |
2077 | * flush is required to enforce that (active && !olr) == no wait | 2102 | ret = i915_gem_object_flush_active(obj); |
2078 | * necessary. | ||
2079 | */ | ||
2080 | ret = i915_gem_object_flush_gpu_write_domain(obj); | ||
2081 | if (ret) | 2103 | if (ret) |
2082 | goto out; | 2104 | goto out; |
2083 | 2105 | ||
@@ -2089,10 +2111,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2089 | if (seqno == 0) | 2111 | if (seqno == 0) |
2090 | goto out; | 2112 | goto out; |
2091 | 2113 | ||
2092 | ret = i915_gem_check_olr(ring, seqno); | ||
2093 | if (ret) | ||
2094 | goto out; | ||
2095 | |||
2096 | /* Do this after OLR check to make sure we make forward progress polling | 2114 | /* Do this after OLR check to make sure we make forward progress polling |
2097 | * on this IOCTL with a 0 timeout (like busy ioctl) | 2115 | * on this IOCTL with a 0 timeout (like busy ioctl) |
2098 | */ | 2116 | */ |
@@ -3330,30 +3348,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
3330 | * become non-busy without any further actions, therefore emit any | 3348 | * become non-busy without any further actions, therefore emit any |
3331 | * necessary flushes here. | 3349 | * necessary flushes here. |
3332 | */ | 3350 | */ |
3333 | args->busy = obj->active; | 3351 | ret = i915_gem_object_flush_active(obj); |
3334 | if (args->busy) { | ||
3335 | /* Unconditionally flush objects, even when the gpu still uses this | ||
3336 | * object. Userspace calling this function indicates that it wants to | ||
3337 | * use this buffer rather sooner than later, so issuing the required | ||
3338 | * flush earlier is beneficial. | ||
3339 | */ | ||
3340 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { | ||
3341 | ret = i915_gem_flush_ring(obj->ring, | ||
3342 | 0, obj->base.write_domain); | ||
3343 | } else { | ||
3344 | ret = i915_gem_check_olr(obj->ring, | ||
3345 | obj->last_rendering_seqno); | ||
3346 | } | ||
3347 | 3352 | ||
3348 | /* Update the active list for the hardware's current position. | 3353 | args->busy = obj->active; |
3349 | * Otherwise this only updates on a delayed timer or when irqs | ||
3350 | * are actually unmasked, and our working set ends up being | ||
3351 | * larger than required. | ||
3352 | */ | ||
3353 | i915_gem_retire_requests_ring(obj->ring); | ||
3354 | |||
3355 | args->busy = obj->active; | ||
3356 | } | ||
3357 | 3354 | ||
3358 | drm_gem_object_unreference(&obj->base); | 3355 | drm_gem_object_unreference(&obj->base); |
3359 | unlock: | 3356 | unlock: |