aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRob Clark <robdclark@gmail.com>2013-09-01 13:25:09 -0400
committerRob Clark <robdclark@gmail.com>2013-09-10 13:56:58 -0400
commitbf6811f304795e7697985449ee870b29a8cbc6c7 (patch)
tree23e844ef2eb4887d6d8f939fc056ffa54b1cbcd7
parent86a7e1224a68511d3a1ae0b7e11581b9d37723ae (diff)
drm/msm: handle read vs write fences
The userspace API already had everything needed to handle read vs write synchronization. This patch actually bothers to hook it up properly, so that we don't need to (for example) stall on userspace read access to a buffer that gpu is also still reading. Signed-off-by: Rob Clark <robdclark@gmail.com>
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c25
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c9
4 files changed, 27 insertions, 11 deletions
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 80d75094bf0a..1ea9d46e01bc 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -153,7 +153,7 @@ void *msm_gem_vaddr(struct drm_gem_object *obj);
153int msm_gem_queue_inactive_work(struct drm_gem_object *obj, 153int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
154 struct work_struct *work); 154 struct work_struct *work);
155void msm_gem_move_to_active(struct drm_gem_object *obj, 155void msm_gem_move_to_active(struct drm_gem_object *obj,
156 struct msm_gpu *gpu, uint32_t fence); 156 struct msm_gpu *gpu, bool write, uint32_t fence);
157void msm_gem_move_to_inactive(struct drm_gem_object *obj); 157void msm_gem_move_to_inactive(struct drm_gem_object *obj);
158int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, 158int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
159 struct timespec *timeout); 159 struct timespec *timeout);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 6b5a6c8c7658..df0390f5ec58 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -393,11 +393,14 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
393} 393}
394 394
395void msm_gem_move_to_active(struct drm_gem_object *obj, 395void msm_gem_move_to_active(struct drm_gem_object *obj,
396 struct msm_gpu *gpu, uint32_t fence) 396 struct msm_gpu *gpu, bool write, uint32_t fence)
397{ 397{
398 struct msm_gem_object *msm_obj = to_msm_bo(obj); 398 struct msm_gem_object *msm_obj = to_msm_bo(obj);
399 msm_obj->gpu = gpu; 399 msm_obj->gpu = gpu;
400 msm_obj->fence = fence; 400 if (write)
401 msm_obj->write_fence = fence;
402 else
403 msm_obj->read_fence = fence;
401 list_del_init(&msm_obj->mm_list); 404 list_del_init(&msm_obj->mm_list);
402 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 405 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
403} 406}
@@ -411,7 +414,8 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
411 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 414 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
412 415
413 msm_obj->gpu = NULL; 416 msm_obj->gpu = NULL;
414 msm_obj->fence = 0; 417 msm_obj->read_fence = 0;
418 msm_obj->write_fence = 0;
415 list_del_init(&msm_obj->mm_list); 419 list_del_init(&msm_obj->mm_list);
416 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 420 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
417 421
@@ -433,8 +437,14 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
433 struct msm_gem_object *msm_obj = to_msm_bo(obj); 437 struct msm_gem_object *msm_obj = to_msm_bo(obj);
434 int ret = 0; 438 int ret = 0;
435 439
436 if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC)) 440 if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC)) {
437 ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout); 441 uint32_t fence = 0;
442 if (op & MSM_PREP_READ)
443 fence = msm_obj->write_fence;
444 if (op & MSM_PREP_WRITE)
445 fence = max(fence, msm_obj->read_fence);
446 ret = msm_wait_fence_interruptable(dev, fence, timeout);
447 }
438 448
439 /* TODO cache maintenance */ 449 /* TODO cache maintenance */
440 450
@@ -455,9 +465,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
455 uint64_t off = drm_vma_node_start(&obj->vma_node); 465 uint64_t off = drm_vma_node_start(&obj->vma_node);
456 466
457 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 467 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
458 seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n", 468 seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
459 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 469 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
460 msm_obj->fence, obj->name, obj->refcount.refcount.counter, 470 msm_obj->read_fence, msm_obj->write_fence,
471 obj->name, obj->refcount.refcount.counter,
461 off, msm_obj->vaddr, obj->size); 472 off, msm_obj->vaddr, obj->size);
462} 473}
463 474
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index d746f13d283c..0676f32e2c6a 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -36,7 +36,7 @@ struct msm_gem_object {
36 */ 36 */
37 struct list_head mm_list; 37 struct list_head mm_list;
38 struct msm_gpu *gpu; /* non-null if active */ 38 struct msm_gpu *gpu; /* non-null if active */
39 uint32_t fence; 39 uint32_t read_fence, write_fence;
40 40
41 /* Transiently in the process of submit ioctl, objects associated 41 /* Transiently in the process of submit ioctl, objects associated
42 * with the submit are on submit->bo_list.. this only lasts for 42 * with the submit are on submit->bo_list.. this only lasts for
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index e1e1ec9321ff..cb9cdffdc41f 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -265,7 +265,8 @@ static void retire_worker(struct work_struct *work)
265 obj = list_first_entry(&gpu->active_list, 265 obj = list_first_entry(&gpu->active_list,
266 struct msm_gem_object, mm_list); 266 struct msm_gem_object, mm_list);
267 267
268 if (obj->fence <= fence) { 268 if ((obj->read_fence <= fence) &&
269 (obj->write_fence <= fence)) {
269 /* move to inactive: */ 270 /* move to inactive: */
270 msm_gem_move_to_inactive(&obj->base); 271 msm_gem_move_to_inactive(&obj->base);
271 msm_gem_put_iova(&obj->base, gpu->id); 272 msm_gem_put_iova(&obj->base, gpu->id);
@@ -321,7 +322,11 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
321 submit->gpu->id, &iova); 322 submit->gpu->id, &iova);
322 } 323 }
323 324
324 msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence); 325 if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
326 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
327
328 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
329 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
325 } 330 }
326 hangcheck_timer_reset(gpu); 331 hangcheck_timer_reset(gpu);
327 mutex_unlock(&dev->struct_mutex); 332 mutex_unlock(&dev->struct_mutex);