diff options
author | Dave Airlie <airlied@redhat.com> | 2014-01-13 19:46:19 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2014-01-13 19:46:19 -0500 |
commit | a095c60bd06f204c98527aafd5fda6ef42b53eb5 (patch) | |
tree | 5725ef3563751a7dab26d4b8c50694c239697911 /drivers/gpu | |
parent | 99d4a8ae93ead27b5a88cdbd09dc556fe96ac3a8 (diff) | |
parent | 52028704126b5597775cc788028385556af1f85c (diff) |
Merge tag 'ttm-next-2014-01-13' of git://people.freedesktop.org/~thomash/linux into drm-next
Some code cleanup by Rashika Keria,
VM stuff for ttm:
-Use PFNMAP instead of MIXEDMAP where possible for performance
-Refuse to fault imported pages, an initial step to support dma-bufs
better from within TTM.
-Correctly set page mapping and -index members. These are needed in various
places in the vm subsystem that we are not using yet, but plan to use soonish:
For example unmap-mapping-range keeping COW pages, and dirty tracking
fbdefio style, but also for PCI memory.
ttm-next 2013-01-14 pull request
* tag 'ttm-next-2014-01-13' of git://people.freedesktop.org/~thomash/linux:
drivers: gpu: Remove unused function in ttm_lock.c
drivers: gpu: Mark function as static in ttm_bo_util.c
drivers: gpu: Mark function as static in ttm_bo.c
drm/ttm: Correctly set page mapping and -index members
drm/ttm: Refuse to fault (prime-) imported pages
drm/ttm: Use VM_PFNMAP for shared bo maps
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_lock.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_tt.c | 27 |
5 files changed, 55 insertions, 18 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 07e02c4bf5a8..a06651309388 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -957,7 +957,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
957 | } | 957 | } |
958 | EXPORT_SYMBOL(ttm_bo_mem_space); | 958 | EXPORT_SYMBOL(ttm_bo_mem_space); |
959 | 959 | ||
960 | int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | 960 | static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
961 | struct ttm_placement *placement, | 961 | struct ttm_placement *placement, |
962 | bool interruptible, | 962 | bool interruptible, |
963 | bool no_wait_gpu) | 963 | bool no_wait_gpu) |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 99aab8639089..145f54f17b85 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -187,7 +187,7 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) | |||
187 | } | 187 | } |
188 | } | 188 | } |
189 | 189 | ||
190 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | 190 | static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
191 | void **virtual) | 191 | void **virtual) |
192 | { | 192 | { |
193 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | 193 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
@@ -219,7 +219,7 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | |||
219 | return 0; | 219 | return 0; |
220 | } | 220 | } |
221 | 221 | ||
222 | void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | 222 | static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
223 | void *virtual) | 223 | void *virtual) |
224 | { | 224 | { |
225 | struct ttm_mem_type_manager *man; | 225 | struct ttm_mem_type_manager *man; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index b249ab9b1eb2..cfcdf5b5440a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -132,6 +132,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
132 | return VM_FAULT_NOPAGE; | 132 | return VM_FAULT_NOPAGE; |
133 | } | 133 | } |
134 | 134 | ||
135 | /* | ||
136 | * Refuse to fault imported pages. This should be handled | ||
137 | * (if at all) by redirecting mmap to the exporter. | ||
138 | */ | ||
139 | if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { | ||
140 | retval = VM_FAULT_SIGBUS; | ||
141 | goto out_unlock; | ||
142 | } | ||
143 | |||
135 | if (bdev->driver->fault_reserve_notify) { | 144 | if (bdev->driver->fault_reserve_notify) { |
136 | ret = bdev->driver->fault_reserve_notify(bo); | 145 | ret = bdev->driver->fault_reserve_notify(bo); |
137 | switch (ret) { | 146 | switch (ret) { |
@@ -217,10 +226,17 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
217 | } else if (unlikely(!page)) { | 226 | } else if (unlikely(!page)) { |
218 | break; | 227 | break; |
219 | } | 228 | } |
229 | page->mapping = vma->vm_file->f_mapping; | ||
230 | page->index = drm_vma_node_start(&bo->vma_node) + | ||
231 | page_offset; | ||
220 | pfn = page_to_pfn(page); | 232 | pfn = page_to_pfn(page); |
221 | } | 233 | } |
222 | 234 | ||
223 | ret = vm_insert_mixed(&cvma, address, pfn); | 235 | if (vma->vm_flags & VM_MIXEDMAP) |
236 | ret = vm_insert_mixed(&cvma, address, pfn); | ||
237 | else | ||
238 | ret = vm_insert_pfn(&cvma, address, pfn); | ||
239 | |||
224 | /* | 240 | /* |
225 | * Somebody beat us to this PTE or prefaulting to | 241 | * Somebody beat us to this PTE or prefaulting to |
226 | * an already populated PTE, or prefaulting error. | 242 | * an already populated PTE, or prefaulting error. |
@@ -250,6 +266,8 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma) | |||
250 | struct ttm_buffer_object *bo = | 266 | struct ttm_buffer_object *bo = |
251 | (struct ttm_buffer_object *)vma->vm_private_data; | 267 | (struct ttm_buffer_object *)vma->vm_private_data; |
252 | 268 | ||
269 | WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); | ||
270 | |||
253 | (void)ttm_bo_reference(bo); | 271 | (void)ttm_bo_reference(bo); |
254 | } | 272 | } |
255 | 273 | ||
@@ -319,7 +337,14 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, | |||
319 | */ | 337 | */ |
320 | 338 | ||
321 | vma->vm_private_data = bo; | 339 | vma->vm_private_data = bo; |
322 | vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; | 340 | |
341 | /* | ||
342 | * PFNMAP is faster than MIXEDMAP due to reduced page | ||
343 | * administration. So use MIXEDMAP only if private VMA, where | ||
344 | * we need to support COW. | ||
345 | */ | ||
346 | vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; | ||
347 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | ||
323 | return 0; | 348 | return 0; |
324 | out_unref: | 349 | out_unref: |
325 | ttm_bo_unref(&bo); | 350 | ttm_bo_unref(&bo); |
@@ -334,7 +359,8 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) | |||
334 | 359 | ||
335 | vma->vm_ops = &ttm_bo_vm_ops; | 360 | vma->vm_ops = &ttm_bo_vm_ops; |
336 | vma->vm_private_data = ttm_bo_reference(bo); | 361 | vma->vm_private_data = ttm_bo_reference(bo); |
337 | vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; | 362 | vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; |
363 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; | ||
338 | return 0; | 364 | return 0; |
339 | } | 365 | } |
340 | EXPORT_SYMBOL(ttm_fbdev_mmap); | 366 | EXPORT_SYMBOL(ttm_fbdev_mmap); |
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c index 3daa9a3930b8..6a954544727f 100644 --- a/drivers/gpu/drm/ttm/ttm_lock.c +++ b/drivers/gpu/drm/ttm/ttm_lock.c | |||
@@ -186,14 +186,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible) | |||
186 | } | 186 | } |
187 | EXPORT_SYMBOL(ttm_write_lock); | 187 | EXPORT_SYMBOL(ttm_write_lock); |
188 | 188 | ||
189 | void ttm_write_lock_downgrade(struct ttm_lock *lock) | ||
190 | { | ||
191 | spin_lock(&lock->lock); | ||
192 | lock->rw = 1; | ||
193 | wake_up_all(&lock->queue); | ||
194 | spin_unlock(&lock->lock); | ||
195 | } | ||
196 | |||
197 | static int __ttm_vt_unlock(struct ttm_lock *lock) | 189 | static int __ttm_vt_unlock(struct ttm_lock *lock) |
198 | { | 190 | { |
199 | int ret = 0; | 191 | int ret = 0; |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 210d50365162..9af99084b344 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -170,9 +170,8 @@ void ttm_tt_destroy(struct ttm_tt *ttm) | |||
170 | ttm_tt_unbind(ttm); | 170 | ttm_tt_unbind(ttm); |
171 | } | 171 | } |
172 | 172 | ||
173 | if (ttm->state == tt_unbound) { | 173 | if (ttm->state == tt_unbound) |
174 | ttm->bdev->driver->ttm_tt_unpopulate(ttm); | 174 | ttm_tt_unpopulate(ttm); |
175 | } | ||
176 | 175 | ||
177 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && | 176 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && |
178 | ttm->swap_storage) | 177 | ttm->swap_storage) |
@@ -362,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) | |||
362 | page_cache_release(to_page); | 361 | page_cache_release(to_page); |
363 | } | 362 | } |
364 | 363 | ||
365 | ttm->bdev->driver->ttm_tt_unpopulate(ttm); | 364 | ttm_tt_unpopulate(ttm); |
366 | ttm->swap_storage = swap_storage; | 365 | ttm->swap_storage = swap_storage; |
367 | ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; | 366 | ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; |
368 | if (persistent_swap_storage) | 367 | if (persistent_swap_storage) |
@@ -375,3 +374,23 @@ out_err: | |||
375 | 374 | ||
376 | return ret; | 375 | return ret; |
377 | } | 376 | } |
377 | |||
378 | static void ttm_tt_clear_mapping(struct ttm_tt *ttm) | ||
379 | { | ||
380 | pgoff_t i; | ||
381 | struct page **page = ttm->pages; | ||
382 | |||
383 | for (i = 0; i < ttm->num_pages; ++i) { | ||
384 | (*page)->mapping = NULL; | ||
385 | (*page++)->index = 0; | ||
386 | } | ||
387 | } | ||
388 | |||
389 | void ttm_tt_unpopulate(struct ttm_tt *ttm) | ||
390 | { | ||
391 | if (ttm->state == tt_unpopulated) | ||
392 | return; | ||
393 | |||
394 | ttm_tt_clear_mapping(ttm); | ||
395 | ttm->bdev->driver->ttm_tt_unpopulate(ttm); | ||
396 | } | ||