aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorJohn Harrison <John.C.Harrison@Intel.com>2014-11-24 13:49:35 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-12-03 03:35:19 -0500
commit9c654818295eee21720e62040e235e6951b05b40 (patch)
tree0578ceeb092e4babb6d0c3407e786026413a96c1 /drivers/gpu
parentcc8c4cc2a0cee06ecdd27aa654e26ec3b2b05048 (diff)
drm/i915: Convert __wait_seqno() to __wait_request()
Now that all code above is using request structures instead of seqno values, it is possible to convert __wait_seqno() itself. Internally, it is still calling i915_seqno_passed(), this will be updated later in the series. This step is just changing the parameter list and function name. For: VIZ-4377 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Thomas Daniel <Thomas.Daniel@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c45
-rw-r--r--drivers/gpu/drm/i915/intel_display.c7
3 files changed, 24 insertions, 30 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9a9372a42c97..69a0e00039c6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2625,7 +2625,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
2625 u32 *seqno); 2625 u32 *seqno);
2626#define i915_add_request(ring, seqno) \ 2626#define i915_add_request(ring, seqno) \
2627 __i915_add_request(ring, NULL, NULL, seqno) 2627 __i915_add_request(ring, NULL, NULL, seqno)
2628int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, 2628int __i915_wait_request(struct drm_i915_gem_request *req,
2629 unsigned reset_counter, 2629 unsigned reset_counter,
2630 bool interruptible, 2630 bool interruptible,
2631 s64 *timeout, 2631 s64 *timeout,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1089f0ff5ee3..3f56f50900bd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1189,10 +1189,9 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1189} 1189}
1190 1190
1191/** 1191/**
1192 * __i915_wait_seqno - wait until execution of seqno has finished 1192 * __i915_wait_request - wait until execution of request has finished
1193 * @ring: the ring expected to report seqno 1193 * @req: duh!
1194 * @seqno: duh! 1194 * @reset_counter: reset sequence associated with the given request
1195 * @reset_counter: reset sequence associated with the given seqno
1196 * @interruptible: do an interruptible wait (normally yes) 1195 * @interruptible: do an interruptible wait (normally yes)
1197 * @timeout: in - how long to wait (NULL forever); out - how much time remaining 1196 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1198 * 1197 *
@@ -1203,15 +1202,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1203 * reset_counter _must_ be read before, and an appropriate smp_rmb must be 1202 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1204 * inserted. 1203 * inserted.
1205 * 1204 *
1206 * Returns 0 if the seqno was found within the alloted time. Else returns the 1205 * Returns 0 if the request was found within the alloted time. Else returns the
1207 * errno with remaining time filled in timeout argument. 1206 * errno with remaining time filled in timeout argument.
1208 */ 1207 */
1209int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, 1208int __i915_wait_request(struct drm_i915_gem_request *req,
1210 unsigned reset_counter, 1209 unsigned reset_counter,
1211 bool interruptible, 1210 bool interruptible,
1212 s64 *timeout, 1211 s64 *timeout,
1213 struct drm_i915_file_private *file_priv) 1212 struct drm_i915_file_private *file_priv)
1214{ 1213{
1214 struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
1215 struct drm_device *dev = ring->dev; 1215 struct drm_device *dev = ring->dev;
1216 struct drm_i915_private *dev_priv = dev->dev_private; 1216 struct drm_i915_private *dev_priv = dev->dev_private;
1217 const bool irq_test_in_progress = 1217 const bool irq_test_in_progress =
@@ -1223,7 +1223,8 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1223 1223
1224 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); 1224 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1225 1225
1226 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1226 if (i915_seqno_passed(ring->get_seqno(ring, true),
1227 i915_gem_request_get_seqno(req)))
1227 return 0; 1228 return 0;
1228 1229
1229 timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0; 1230 timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
@@ -1240,7 +1241,8 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1240 return -ENODEV; 1241 return -ENODEV;
1241 1242
1242 /* Record current time in case interrupted by signal, or wedged */ 1243 /* Record current time in case interrupted by signal, or wedged */
1243 trace_i915_gem_request_wait_begin(ring, seqno); 1244 trace_i915_gem_request_wait_begin(i915_gem_request_get_ring(req),
1245 i915_gem_request_get_seqno(req));
1244 before = ktime_get_raw_ns(); 1246 before = ktime_get_raw_ns();
1245 for (;;) { 1247 for (;;) {
1246 struct timer_list timer; 1248 struct timer_list timer;
@@ -1259,7 +1261,8 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1259 break; 1261 break;
1260 } 1262 }
1261 1263
1262 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) { 1264 if (i915_seqno_passed(ring->get_seqno(ring, false),
1265 i915_gem_request_get_seqno(req))) {
1263 ret = 0; 1266 ret = 0;
1264 break; 1267 break;
1265 } 1268 }
@@ -1291,7 +1294,8 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1291 } 1294 }
1292 } 1295 }
1293 now = ktime_get_raw_ns(); 1296 now = ktime_get_raw_ns();
1294 trace_i915_gem_request_wait_end(ring, seqno); 1297 trace_i915_gem_request_wait_end(i915_gem_request_get_ring(req),
1298 i915_gem_request_get_seqno(req));
1295 1299
1296 if (!irq_test_in_progress) 1300 if (!irq_test_in_progress)
1297 ring->irq_put(ring); 1301 ring->irq_put(ring);
@@ -1338,8 +1342,8 @@ i915_wait_request(struct drm_i915_gem_request *req)
1338 1342
1339 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1343 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1340 i915_gem_request_reference(req); 1344 i915_gem_request_reference(req);
1341 ret = __i915_wait_seqno(req->ring, i915_gem_request_get_seqno(req), 1345 ret = __i915_wait_request(req, reset_counter,
1342 reset_counter, interruptible, NULL, NULL); 1346 interruptible, NULL, NULL);
1343 i915_gem_request_unreference(req); 1347 i915_gem_request_unreference(req);
1344 return ret; 1348 return ret;
1345} 1349}
@@ -1395,7 +1399,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1395 struct drm_i915_gem_request *req; 1399 struct drm_i915_gem_request *req;
1396 struct drm_device *dev = obj->base.dev; 1400 struct drm_device *dev = obj->base.dev;
1397 struct drm_i915_private *dev_priv = dev->dev_private; 1401 struct drm_i915_private *dev_priv = dev->dev_private;
1398 struct intel_engine_cs *ring = obj->ring;
1399 unsigned reset_counter; 1402 unsigned reset_counter;
1400 int ret; 1403 int ret;
1401 1404
@@ -1417,8 +1420,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1417 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1420 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1418 i915_gem_request_reference(req); 1421 i915_gem_request_reference(req);
1419 mutex_unlock(&dev->struct_mutex); 1422 mutex_unlock(&dev->struct_mutex);
1420 ret = __i915_wait_seqno(ring, i915_gem_request_get_seqno(req), 1423 ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv);
1421 reset_counter, true, NULL, file_priv);
1422 mutex_lock(&dev->struct_mutex); 1424 mutex_lock(&dev->struct_mutex);
1423 i915_gem_request_unreference(req); 1425 i915_gem_request_unreference(req);
1424 if (ret) 1426 if (ret)
@@ -2917,9 +2919,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2917 struct drm_i915_gem_wait *args = data; 2919 struct drm_i915_gem_wait *args = data;
2918 struct drm_i915_gem_object *obj; 2920 struct drm_i915_gem_object *obj;
2919 struct drm_i915_gem_request *req; 2921 struct drm_i915_gem_request *req;
2920 struct intel_engine_cs *ring = NULL;
2921 unsigned reset_counter; 2922 unsigned reset_counter;
2922 u32 seqno = 0;
2923 int ret = 0; 2923 int ret = 0;
2924 2924
2925 if (args->flags != 0) 2925 if (args->flags != 0)
@@ -2944,9 +2944,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2944 goto out; 2944 goto out;
2945 2945
2946 req = obj->last_read_req; 2946 req = obj->last_read_req;
2947 seqno = i915_gem_request_get_seqno(req);
2948 WARN_ON(seqno == 0);
2949 ring = obj->ring;
2950 2947
2951 /* Do this after OLR check to make sure we make forward progress polling 2948 /* Do this after OLR check to make sure we make forward progress polling
2952 * on this IOCTL with a timeout <=0 (like busy ioctl) 2949 * on this IOCTL with a timeout <=0 (like busy ioctl)
@@ -2961,8 +2958,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2961 i915_gem_request_reference(req); 2958 i915_gem_request_reference(req);
2962 mutex_unlock(&dev->struct_mutex); 2959 mutex_unlock(&dev->struct_mutex);
2963 2960
2964 ret = __i915_wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns, 2961 ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns,
2965 file->driver_priv); 2962 file->driver_priv);
2966 mutex_lock(&dev->struct_mutex); 2963 mutex_lock(&dev->struct_mutex);
2967 i915_gem_request_unreference(req); 2964 i915_gem_request_unreference(req);
2968 mutex_unlock(&dev->struct_mutex); 2965 mutex_unlock(&dev->struct_mutex);
@@ -4127,9 +4124,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4127 if (target == NULL) 4124 if (target == NULL)
4128 return 0; 4125 return 0;
4129 4126
4130 ret = __i915_wait_seqno(i915_gem_request_get_ring(target), 4127 ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
4131 i915_gem_request_get_seqno(target),
4132 reset_counter, true, NULL, NULL);
4133 if (ret == 0) 4128 if (ret == 0)
4134 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 4129 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4135 4130
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 61c402485f36..0eaa1f48efcf 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9618,10 +9618,9 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
9618 9618
9619 mmio_flip = &crtc->mmio_flip; 9619 mmio_flip = &crtc->mmio_flip;
9620 if (mmio_flip->req) 9620 if (mmio_flip->req)
9621 WARN_ON(__i915_wait_seqno(i915_gem_request_get_ring(mmio_flip->req), 9621 WARN_ON(__i915_wait_request(mmio_flip->req,
9622 i915_gem_request_get_seqno(mmio_flip->req), 9622 crtc->reset_counter,
9623 crtc->reset_counter, 9623 false, NULL, NULL) != 0);
9624 false, NULL, NULL) != 0);
9625 9624
9626 intel_do_mmio_flip(crtc); 9625 intel_do_mmio_flip(crtc);
9627 if (mmio_flip->req) { 9626 if (mmio_flip->req) {