diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2013-10-09 06:18:07 -0400 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2013-11-06 07:14:43 -0500 |
commit | cbe12e74ee4e29b6cb4e63fa284e80b73ad57926 (patch) | |
tree | ef667b3eca6206be60d338af822ab707365d1f74 /drivers/gpu/drm/ttm/ttm_bo_vm.c | |
parent | 4695b03970df378dcb93fe3e7158381f1e980fa2 (diff) |
drm/ttm: Allow vm fault retries
Make use of the FAULT_FLAG_ALLOW_RETRY flag to allow dropping the
mmap_sem while waiting for bo idle.
FAULT_FLAG_ALLOW_RETRY appears to be primarily designed for disk waits
but should work just as fine for GPU waits..
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_vm.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 62 |
1 files changed, 50 insertions, 12 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 1006c15445e9..c03514b93f9c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -41,6 +41,51 @@ | |||
41 | 41 | ||
42 | #define TTM_BO_VM_NUM_PREFAULT 16 | 42 | #define TTM_BO_VM_NUM_PREFAULT 16 |
43 | 43 | ||
44 | static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | ||
45 | struct vm_area_struct *vma, | ||
46 | struct vm_fault *vmf) | ||
47 | { | ||
48 | struct ttm_bo_device *bdev = bo->bdev; | ||
49 | int ret = 0; | ||
50 | |||
51 | spin_lock(&bdev->fence_lock); | ||
52 | if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags))) | ||
53 | goto out_unlock; | ||
54 | |||
55 | /* | ||
56 | * Quick non-stalling check for idle. | ||
57 | */ | ||
58 | ret = ttm_bo_wait(bo, false, false, true); | ||
59 | if (likely(ret == 0)) | ||
60 | goto out_unlock; | ||
61 | |||
62 | /* | ||
63 | * If possible, avoid waiting for GPU with mmap_sem | ||
64 | * held. | ||
65 | */ | ||
66 | if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { | ||
67 | ret = VM_FAULT_RETRY; | ||
68 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) | ||
69 | goto out_unlock; | ||
70 | |||
71 | up_read(&vma->vm_mm->mmap_sem); | ||
72 | (void) ttm_bo_wait(bo, false, true, false); | ||
73 | goto out_unlock; | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Ordinary wait. | ||
78 | */ | ||
79 | ret = ttm_bo_wait(bo, false, true, false); | ||
80 | if (unlikely(ret != 0)) | ||
81 | ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : | ||
82 | VM_FAULT_NOPAGE; | ||
83 | |||
84 | out_unlock: | ||
85 | spin_unlock(&bdev->fence_lock); | ||
86 | return ret; | ||
87 | } | ||
88 | |||
44 | static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 89 | static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
45 | { | 90 | { |
46 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) | 91 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) |
@@ -91,18 +136,11 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
91 | * Wait for buffer data in transit, due to a pipelined | 136 | * Wait for buffer data in transit, due to a pipelined |
92 | * move. | 137 | * move. |
93 | */ | 138 | */ |
94 | 139 | ret = ttm_bo_vm_fault_idle(bo, vma, vmf); | |
95 | spin_lock(&bdev->fence_lock); | 140 | if (unlikely(ret != 0)) { |
96 | if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { | 141 | retval = ret; |
97 | ret = ttm_bo_wait(bo, false, true, false); | 142 | goto out_unlock; |
98 | spin_unlock(&bdev->fence_lock); | 143 | } |
99 | if (unlikely(ret != 0)) { | ||
100 | retval = (ret != -ERESTARTSYS) ? | ||
101 | VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; | ||
102 | goto out_unlock; | ||
103 | } | ||
104 | } else | ||
105 | spin_unlock(&bdev->fence_lock); | ||
106 | 144 | ||
107 | ret = ttm_mem_io_lock(man, true); | 145 | ret = ttm_mem_io_lock(man, true); |
108 | if (unlikely(ret != 0)) { | 146 | if (unlikely(ret != 0)) { |