diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_vm.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 92 |
1 files changed, 63 insertions, 29 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 1006c15445e9..ac617f3ecd0c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -41,6 +41,51 @@ | |||
41 | 41 | ||
42 | #define TTM_BO_VM_NUM_PREFAULT 16 | 42 | #define TTM_BO_VM_NUM_PREFAULT 16 |
43 | 43 | ||
44 | static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | ||
45 | struct vm_area_struct *vma, | ||
46 | struct vm_fault *vmf) | ||
47 | { | ||
48 | struct ttm_bo_device *bdev = bo->bdev; | ||
49 | int ret = 0; | ||
50 | |||
51 | spin_lock(&bdev->fence_lock); | ||
52 | if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags))) | ||
53 | goto out_unlock; | ||
54 | |||
55 | /* | ||
56 | * Quick non-stalling check for idle. | ||
57 | */ | ||
58 | ret = ttm_bo_wait(bo, false, false, true); | ||
59 | if (likely(ret == 0)) | ||
60 | goto out_unlock; | ||
61 | |||
62 | /* | ||
63 | * If possible, avoid waiting for GPU with mmap_sem | ||
64 | * held. | ||
65 | */ | ||
66 | if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { | ||
67 | ret = VM_FAULT_RETRY; | ||
68 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) | ||
69 | goto out_unlock; | ||
70 | |||
71 | up_read(&vma->vm_mm->mmap_sem); | ||
72 | (void) ttm_bo_wait(bo, false, true, false); | ||
73 | goto out_unlock; | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Ordinary wait. | ||
78 | */ | ||
79 | ret = ttm_bo_wait(bo, false, true, false); | ||
80 | if (unlikely(ret != 0)) | ||
81 | ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : | ||
82 | VM_FAULT_NOPAGE; | ||
83 | |||
84 | out_unlock: | ||
85 | spin_unlock(&bdev->fence_lock); | ||
86 | return ret; | ||
87 | } | ||
88 | |||
44 | static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 89 | static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
45 | { | 90 | { |
46 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) | 91 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) |
@@ -57,6 +102,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
57 | int retval = VM_FAULT_NOPAGE; | 102 | int retval = VM_FAULT_NOPAGE; |
58 | struct ttm_mem_type_manager *man = | 103 | struct ttm_mem_type_manager *man = |
59 | &bdev->man[bo->mem.mem_type]; | 104 | &bdev->man[bo->mem.mem_type]; |
105 | struct vm_area_struct cvma; | ||
60 | 106 | ||
61 | /* | 107 | /* |
62 | * Work around locking order reversal in fault / nopfn | 108 | * Work around locking order reversal in fault / nopfn |
@@ -91,18 +137,11 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
91 | * Wait for buffer data in transit, due to a pipelined | 137 | * Wait for buffer data in transit, due to a pipelined |
92 | * move. | 138 | * move. |
93 | */ | 139 | */ |
94 | 140 | ret = ttm_bo_vm_fault_idle(bo, vma, vmf); | |
95 | spin_lock(&bdev->fence_lock); | 141 | if (unlikely(ret != 0)) { |
96 | if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { | 142 | retval = ret; |
97 | ret = ttm_bo_wait(bo, false, true, false); | 143 | goto out_unlock; |
98 | spin_unlock(&bdev->fence_lock); | 144 | } |
99 | if (unlikely(ret != 0)) { | ||
100 | retval = (ret != -ERESTARTSYS) ? | ||
101 | VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; | ||
102 | goto out_unlock; | ||
103 | } | ||
104 | } else | ||
105 | spin_unlock(&bdev->fence_lock); | ||
106 | 145 | ||
107 | ret = ttm_mem_io_lock(man, true); | 146 | ret = ttm_mem_io_lock(man, true); |
108 | if (unlikely(ret != 0)) { | 147 | if (unlikely(ret != 0)) { |
@@ -126,26 +165,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
126 | } | 165 | } |
127 | 166 | ||
128 | /* | 167 | /* |
129 | * Strictly, we're not allowed to modify vma->vm_page_prot here, | 168 | * Make a local vma copy to modify the page_prot member |
130 | * since the mmap_sem is only held in read mode. However, we | 169 | * and vm_flags if necessary. The vma parameter is protected |
131 | * modify only the caching bits of vma->vm_page_prot and | 170 | * by mmap_sem in write mode. |
132 | * consider those bits protected by | ||
133 | * the bo->mutex, as we should be the only writers. | ||
134 | * There shouldn't really be any readers of these bits except | ||
135 | * within vm_insert_mixed()? fork? | ||
136 | * | ||
137 | * TODO: Add a list of vmas to the bo, and change the | ||
138 | * vma->vm_page_prot when the object changes caching policy, with | ||
139 | * the correct locks held. | ||
140 | */ | 171 | */ |
172 | cvma = *vma; | ||
173 | cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags); | ||
174 | |||
141 | if (bo->mem.bus.is_iomem) { | 175 | if (bo->mem.bus.is_iomem) { |
142 | vma->vm_page_prot = ttm_io_prot(bo->mem.placement, | 176 | cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, |
143 | vma->vm_page_prot); | 177 | cvma.vm_page_prot); |
144 | } else { | 178 | } else { |
145 | ttm = bo->ttm; | 179 | ttm = bo->ttm; |
146 | vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? | 180 | if (!(bo->mem.placement & TTM_PL_FLAG_CACHED)) |
147 | vm_get_page_prot(vma->vm_flags) : | 181 | cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, |
148 | ttm_io_prot(bo->mem.placement, vma->vm_page_prot); | 182 | cvma.vm_page_prot); |
149 | 183 | ||
150 | /* Allocate all page at once, most common usage */ | 184 | /* Allocate all page at once, most common usage */ |
151 | if (ttm->bdev->driver->ttm_tt_populate(ttm)) { | 185 | if (ttm->bdev->driver->ttm_tt_populate(ttm)) { |
@@ -172,7 +206,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
172 | pfn = page_to_pfn(page); | 206 | pfn = page_to_pfn(page); |
173 | } | 207 | } |
174 | 208 | ||
175 | ret = vm_insert_mixed(vma, address, pfn); | 209 | ret = vm_insert_mixed(&cvma, address, pfn); |
176 | /* | 210 | /* |
177 | * Somebody beat us to this PTE or prefaulting to | 211 | * Somebody beat us to this PTE or prefaulting to |
178 | * an already populated PTE, or prefaulting error. | 212 | * an already populated PTE, or prefaulting error. |