diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-10 20:11:39 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-10 20:11:39 -0500 |
commit | 5b2eef966cb2ae307aa4ef1767f7307774bc96ca (patch) | |
tree | 095a251e145903598dd8d90d5b2eb880f0d6ff93 /drivers/gpu/drm/ttm | |
parent | 8adbf8d46718a8f110de55ec82c40d04d0c362cc (diff) | |
parent | 56bec7c009872ef33fe452ea75fecba481351b44 (diff) |
Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (390 commits)
drm/radeon/kms: disable underscan by default
drm/radeon/kms: only enable hdmi features if the monitor supports audio
drm: Restore the old_fb upon modeset failure
drm/nouveau: fix hwmon device binding
radeon: consolidate asic-specific function decls for pre-r600
vga_switcheroo: comparing too few characters in strncmp()
drm/radeon/kms: add NI pci ids
drm/radeon/kms: don't enable pcie gen2 on NI yet
drm/radeon/kms: add radeon_asic struct for NI asics
drm/radeon/kms/ni: load default sclk/mclk/vddc at pm init
drm/radeon/kms: add ucode loader for NI
drm/radeon/kms: add support for DCE5 display LUTs
drm/radeon/kms: add ni_reg.h
drm/radeon/kms: add bo blit support for NI
drm/radeon/kms: always use writeback/events for fences on NI
drm/radeon/kms: adjust default clock/vddc tracking for pm on DCE5
drm/radeon/kms: add backend map workaround for barts
drm/radeon/kms: fill gpu init for NI asics
drm/radeon/kms: add disabled vbios accessor for NI asics
drm/radeon/kms: handle NI thermal controller
...
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 156 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 138 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 169 |
4 files changed, 381 insertions, 111 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 934a96a78540..af61fc29e843 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -169,7 +169,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) | |||
169 | } | 169 | } |
170 | EXPORT_SYMBOL(ttm_bo_wait_unreserved); | 170 | EXPORT_SYMBOL(ttm_bo_wait_unreserved); |
171 | 171 | ||
172 | static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) | 172 | void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) |
173 | { | 173 | { |
174 | struct ttm_bo_device *bdev = bo->bdev; | 174 | struct ttm_bo_device *bdev = bo->bdev; |
175 | struct ttm_mem_type_manager *man; | 175 | struct ttm_mem_type_manager *man; |
@@ -191,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) | |||
191 | } | 191 | } |
192 | } | 192 | } |
193 | 193 | ||
194 | /** | 194 | int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) |
195 | * Call with the lru_lock held. | ||
196 | */ | ||
197 | |||
198 | static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) | ||
199 | { | 195 | { |
200 | int put_count = 0; | 196 | int put_count = 0; |
201 | 197 | ||
@@ -227,9 +223,18 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, | |||
227 | /** | 223 | /** |
228 | * Deadlock avoidance for multi-bo reserving. | 224 | * Deadlock avoidance for multi-bo reserving. |
229 | */ | 225 | */ |
230 | if (use_sequence && bo->seq_valid && | 226 | if (use_sequence && bo->seq_valid) { |
231 | (sequence - bo->val_seq < (1 << 31))) { | 227 | /** |
232 | return -EAGAIN; | 228 | * We've already reserved this one. |
229 | */ | ||
230 | if (unlikely(sequence == bo->val_seq)) | ||
231 | return -EDEADLK; | ||
232 | /** | ||
233 | * Already reserved by a thread that will not back | ||
234 | * off for us. We need to back off. | ||
235 | */ | ||
236 | if (unlikely(sequence - bo->val_seq < (1 << 31))) | ||
237 | return -EAGAIN; | ||
233 | } | 238 | } |
234 | 239 | ||
235 | if (no_wait) | 240 | if (no_wait) |
@@ -267,6 +272,13 @@ static void ttm_bo_ref_bug(struct kref *list_kref) | |||
267 | BUG(); | 272 | BUG(); |
268 | } | 273 | } |
269 | 274 | ||
275 | void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, | ||
276 | bool never_free) | ||
277 | { | ||
278 | kref_sub(&bo->list_kref, count, | ||
279 | (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); | ||
280 | } | ||
281 | |||
270 | int ttm_bo_reserve(struct ttm_buffer_object *bo, | 282 | int ttm_bo_reserve(struct ttm_buffer_object *bo, |
271 | bool interruptible, | 283 | bool interruptible, |
272 | bool no_wait, bool use_sequence, uint32_t sequence) | 284 | bool no_wait, bool use_sequence, uint32_t sequence) |
@@ -282,20 +294,24 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, | |||
282 | put_count = ttm_bo_del_from_lru(bo); | 294 | put_count = ttm_bo_del_from_lru(bo); |
283 | spin_unlock(&glob->lru_lock); | 295 | spin_unlock(&glob->lru_lock); |
284 | 296 | ||
285 | while (put_count--) | 297 | ttm_bo_list_ref_sub(bo, put_count, true); |
286 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
287 | 298 | ||
288 | return ret; | 299 | return ret; |
289 | } | 300 | } |
290 | 301 | ||
302 | void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) | ||
303 | { | ||
304 | ttm_bo_add_to_lru(bo); | ||
305 | atomic_set(&bo->reserved, 0); | ||
306 | wake_up_all(&bo->event_queue); | ||
307 | } | ||
308 | |||
291 | void ttm_bo_unreserve(struct ttm_buffer_object *bo) | 309 | void ttm_bo_unreserve(struct ttm_buffer_object *bo) |
292 | { | 310 | { |
293 | struct ttm_bo_global *glob = bo->glob; | 311 | struct ttm_bo_global *glob = bo->glob; |
294 | 312 | ||
295 | spin_lock(&glob->lru_lock); | 313 | spin_lock(&glob->lru_lock); |
296 | ttm_bo_add_to_lru(bo); | 314 | ttm_bo_unreserve_locked(bo); |
297 | atomic_set(&bo->reserved, 0); | ||
298 | wake_up_all(&bo->event_queue); | ||
299 | spin_unlock(&glob->lru_lock); | 315 | spin_unlock(&glob->lru_lock); |
300 | } | 316 | } |
301 | EXPORT_SYMBOL(ttm_bo_unreserve); | 317 | EXPORT_SYMBOL(ttm_bo_unreserve); |
@@ -362,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
362 | int ret = 0; | 378 | int ret = 0; |
363 | 379 | ||
364 | if (old_is_pci || new_is_pci || | 380 | if (old_is_pci || new_is_pci || |
365 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) | 381 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { |
366 | ttm_bo_unmap_virtual(bo); | 382 | ret = ttm_mem_io_lock(old_man, true); |
383 | if (unlikely(ret != 0)) | ||
384 | goto out_err; | ||
385 | ttm_bo_unmap_virtual_locked(bo); | ||
386 | ttm_mem_io_unlock(old_man); | ||
387 | } | ||
367 | 388 | ||
368 | /* | 389 | /* |
369 | * Create and bind a ttm if required. | 390 | * Create and bind a ttm if required. |
@@ -416,11 +437,9 @@ moved: | |||
416 | } | 437 | } |
417 | 438 | ||
418 | if (bo->mem.mm_node) { | 439 | if (bo->mem.mm_node) { |
419 | spin_lock(&bo->lock); | ||
420 | bo->offset = (bo->mem.start << PAGE_SHIFT) + | 440 | bo->offset = (bo->mem.start << PAGE_SHIFT) + |
421 | bdev->man[bo->mem.mem_type].gpu_offset; | 441 | bdev->man[bo->mem.mem_type].gpu_offset; |
422 | bo->cur_placement = bo->mem.placement; | 442 | bo->cur_placement = bo->mem.placement; |
423 | spin_unlock(&bo->lock); | ||
424 | } else | 443 | } else |
425 | bo->offset = 0; | 444 | bo->offset = 0; |
426 | 445 | ||
@@ -452,7 +471,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | |||
452 | ttm_tt_destroy(bo->ttm); | 471 | ttm_tt_destroy(bo->ttm); |
453 | bo->ttm = NULL; | 472 | bo->ttm = NULL; |
454 | } | 473 | } |
455 | |||
456 | ttm_bo_mem_put(bo, &bo->mem); | 474 | ttm_bo_mem_put(bo, &bo->mem); |
457 | 475 | ||
458 | atomic_set(&bo->reserved, 0); | 476 | atomic_set(&bo->reserved, 0); |
@@ -474,14 +492,14 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
474 | int put_count; | 492 | int put_count; |
475 | int ret; | 493 | int ret; |
476 | 494 | ||
477 | spin_lock(&bo->lock); | 495 | spin_lock(&bdev->fence_lock); |
478 | (void) ttm_bo_wait(bo, false, false, true); | 496 | (void) ttm_bo_wait(bo, false, false, true); |
479 | if (!bo->sync_obj) { | 497 | if (!bo->sync_obj) { |
480 | 498 | ||
481 | spin_lock(&glob->lru_lock); | 499 | spin_lock(&glob->lru_lock); |
482 | 500 | ||
483 | /** | 501 | /** |
484 | * Lock inversion between bo::reserve and bo::lock here, | 502 | * Lock inversion between bo:reserve and bdev::fence_lock here, |
485 | * but that's OK, since we're only trylocking. | 503 | * but that's OK, since we're only trylocking. |
486 | */ | 504 | */ |
487 | 505 | ||
@@ -490,14 +508,13 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
490 | if (unlikely(ret == -EBUSY)) | 508 | if (unlikely(ret == -EBUSY)) |
491 | goto queue; | 509 | goto queue; |
492 | 510 | ||
493 | spin_unlock(&bo->lock); | 511 | spin_unlock(&bdev->fence_lock); |
494 | put_count = ttm_bo_del_from_lru(bo); | 512 | put_count = ttm_bo_del_from_lru(bo); |
495 | 513 | ||
496 | spin_unlock(&glob->lru_lock); | 514 | spin_unlock(&glob->lru_lock); |
497 | ttm_bo_cleanup_memtype_use(bo); | 515 | ttm_bo_cleanup_memtype_use(bo); |
498 | 516 | ||
499 | while (put_count--) | 517 | ttm_bo_list_ref_sub(bo, put_count, true); |
500 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
501 | 518 | ||
502 | return; | 519 | return; |
503 | } else { | 520 | } else { |
@@ -512,7 +529,7 @@ queue: | |||
512 | kref_get(&bo->list_kref); | 529 | kref_get(&bo->list_kref); |
513 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); | 530 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
514 | spin_unlock(&glob->lru_lock); | 531 | spin_unlock(&glob->lru_lock); |
515 | spin_unlock(&bo->lock); | 532 | spin_unlock(&bdev->fence_lock); |
516 | 533 | ||
517 | if (sync_obj) { | 534 | if (sync_obj) { |
518 | driver->sync_obj_flush(sync_obj, sync_obj_arg); | 535 | driver->sync_obj_flush(sync_obj, sync_obj_arg); |
@@ -537,14 +554,15 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, | |||
537 | bool no_wait_reserve, | 554 | bool no_wait_reserve, |
538 | bool no_wait_gpu) | 555 | bool no_wait_gpu) |
539 | { | 556 | { |
557 | struct ttm_bo_device *bdev = bo->bdev; | ||
540 | struct ttm_bo_global *glob = bo->glob; | 558 | struct ttm_bo_global *glob = bo->glob; |
541 | int put_count; | 559 | int put_count; |
542 | int ret = 0; | 560 | int ret = 0; |
543 | 561 | ||
544 | retry: | 562 | retry: |
545 | spin_lock(&bo->lock); | 563 | spin_lock(&bdev->fence_lock); |
546 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | 564 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
547 | spin_unlock(&bo->lock); | 565 | spin_unlock(&bdev->fence_lock); |
548 | 566 | ||
549 | if (unlikely(ret != 0)) | 567 | if (unlikely(ret != 0)) |
550 | return ret; | 568 | return ret; |
@@ -580,8 +598,7 @@ retry: | |||
580 | spin_unlock(&glob->lru_lock); | 598 | spin_unlock(&glob->lru_lock); |
581 | ttm_bo_cleanup_memtype_use(bo); | 599 | ttm_bo_cleanup_memtype_use(bo); |
582 | 600 | ||
583 | while (put_count--) | 601 | ttm_bo_list_ref_sub(bo, put_count, true); |
584 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
585 | 602 | ||
586 | return 0; | 603 | return 0; |
587 | } | 604 | } |
@@ -652,6 +669,7 @@ static void ttm_bo_release(struct kref *kref) | |||
652 | struct ttm_buffer_object *bo = | 669 | struct ttm_buffer_object *bo = |
653 | container_of(kref, struct ttm_buffer_object, kref); | 670 | container_of(kref, struct ttm_buffer_object, kref); |
654 | struct ttm_bo_device *bdev = bo->bdev; | 671 | struct ttm_bo_device *bdev = bo->bdev; |
672 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; | ||
655 | 673 | ||
656 | if (likely(bo->vm_node != NULL)) { | 674 | if (likely(bo->vm_node != NULL)) { |
657 | rb_erase(&bo->vm_rb, &bdev->addr_space_rb); | 675 | rb_erase(&bo->vm_rb, &bdev->addr_space_rb); |
@@ -659,6 +677,9 @@ static void ttm_bo_release(struct kref *kref) | |||
659 | bo->vm_node = NULL; | 677 | bo->vm_node = NULL; |
660 | } | 678 | } |
661 | write_unlock(&bdev->vm_lock); | 679 | write_unlock(&bdev->vm_lock); |
680 | ttm_mem_io_lock(man, false); | ||
681 | ttm_mem_io_free_vm(bo); | ||
682 | ttm_mem_io_unlock(man); | ||
662 | ttm_bo_cleanup_refs_or_queue(bo); | 683 | ttm_bo_cleanup_refs_or_queue(bo); |
663 | kref_put(&bo->list_kref, ttm_bo_release_list); | 684 | kref_put(&bo->list_kref, ttm_bo_release_list); |
664 | write_lock(&bdev->vm_lock); | 685 | write_lock(&bdev->vm_lock); |
@@ -698,9 +719,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
698 | struct ttm_placement placement; | 719 | struct ttm_placement placement; |
699 | int ret = 0; | 720 | int ret = 0; |
700 | 721 | ||
701 | spin_lock(&bo->lock); | 722 | spin_lock(&bdev->fence_lock); |
702 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | 723 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
703 | spin_unlock(&bo->lock); | 724 | spin_unlock(&bdev->fence_lock); |
704 | 725 | ||
705 | if (unlikely(ret != 0)) { | 726 | if (unlikely(ret != 0)) { |
706 | if (ret != -ERESTARTSYS) { | 727 | if (ret != -ERESTARTSYS) { |
@@ -715,7 +736,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
715 | 736 | ||
716 | evict_mem = bo->mem; | 737 | evict_mem = bo->mem; |
717 | evict_mem.mm_node = NULL; | 738 | evict_mem.mm_node = NULL; |
718 | evict_mem.bus.io_reserved = false; | 739 | evict_mem.bus.io_reserved_vm = false; |
740 | evict_mem.bus.io_reserved_count = 0; | ||
719 | 741 | ||
720 | placement.fpfn = 0; | 742 | placement.fpfn = 0; |
721 | placement.lpfn = 0; | 743 | placement.lpfn = 0; |
@@ -802,8 +824,7 @@ retry: | |||
802 | 824 | ||
803 | BUG_ON(ret != 0); | 825 | BUG_ON(ret != 0); |
804 | 826 | ||
805 | while (put_count--) | 827 | ttm_bo_list_ref_sub(bo, put_count, true); |
806 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
807 | 828 | ||
808 | ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); | 829 | ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); |
809 | ttm_bo_unreserve(bo); | 830 | ttm_bo_unreserve(bo); |
@@ -1036,6 +1057,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
1036 | { | 1057 | { |
1037 | int ret = 0; | 1058 | int ret = 0; |
1038 | struct ttm_mem_reg mem; | 1059 | struct ttm_mem_reg mem; |
1060 | struct ttm_bo_device *bdev = bo->bdev; | ||
1039 | 1061 | ||
1040 | BUG_ON(!atomic_read(&bo->reserved)); | 1062 | BUG_ON(!atomic_read(&bo->reserved)); |
1041 | 1063 | ||
@@ -1044,15 +1066,16 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
1044 | * Have the driver move function wait for idle when necessary, | 1066 | * Have the driver move function wait for idle when necessary, |
1045 | * instead of doing it here. | 1067 | * instead of doing it here. |
1046 | */ | 1068 | */ |
1047 | spin_lock(&bo->lock); | 1069 | spin_lock(&bdev->fence_lock); |
1048 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | 1070 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
1049 | spin_unlock(&bo->lock); | 1071 | spin_unlock(&bdev->fence_lock); |
1050 | if (ret) | 1072 | if (ret) |
1051 | return ret; | 1073 | return ret; |
1052 | mem.num_pages = bo->num_pages; | 1074 | mem.num_pages = bo->num_pages; |
1053 | mem.size = mem.num_pages << PAGE_SHIFT; | 1075 | mem.size = mem.num_pages << PAGE_SHIFT; |
1054 | mem.page_alignment = bo->mem.page_alignment; | 1076 | mem.page_alignment = bo->mem.page_alignment; |
1055 | mem.bus.io_reserved = false; | 1077 | mem.bus.io_reserved_vm = false; |
1078 | mem.bus.io_reserved_count = 0; | ||
1056 | /* | 1079 | /* |
1057 | * Determine where to move the buffer. | 1080 | * Determine where to move the buffer. |
1058 | */ | 1081 | */ |
@@ -1163,7 +1186,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1163 | } | 1186 | } |
1164 | bo->destroy = destroy; | 1187 | bo->destroy = destroy; |
1165 | 1188 | ||
1166 | spin_lock_init(&bo->lock); | ||
1167 | kref_init(&bo->kref); | 1189 | kref_init(&bo->kref); |
1168 | kref_init(&bo->list_kref); | 1190 | kref_init(&bo->list_kref); |
1169 | atomic_set(&bo->cpu_writers, 0); | 1191 | atomic_set(&bo->cpu_writers, 0); |
@@ -1172,6 +1194,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1172 | INIT_LIST_HEAD(&bo->lru); | 1194 | INIT_LIST_HEAD(&bo->lru); |
1173 | INIT_LIST_HEAD(&bo->ddestroy); | 1195 | INIT_LIST_HEAD(&bo->ddestroy); |
1174 | INIT_LIST_HEAD(&bo->swap); | 1196 | INIT_LIST_HEAD(&bo->swap); |
1197 | INIT_LIST_HEAD(&bo->io_reserve_lru); | ||
1175 | bo->bdev = bdev; | 1198 | bo->bdev = bdev; |
1176 | bo->glob = bdev->glob; | 1199 | bo->glob = bdev->glob; |
1177 | bo->type = type; | 1200 | bo->type = type; |
@@ -1181,7 +1204,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1181 | bo->mem.num_pages = bo->num_pages; | 1204 | bo->mem.num_pages = bo->num_pages; |
1182 | bo->mem.mm_node = NULL; | 1205 | bo->mem.mm_node = NULL; |
1183 | bo->mem.page_alignment = page_alignment; | 1206 | bo->mem.page_alignment = page_alignment; |
1184 | bo->mem.bus.io_reserved = false; | 1207 | bo->mem.bus.io_reserved_vm = false; |
1208 | bo->mem.bus.io_reserved_count = 0; | ||
1185 | bo->buffer_start = buffer_start & PAGE_MASK; | 1209 | bo->buffer_start = buffer_start & PAGE_MASK; |
1186 | bo->priv_flags = 0; | 1210 | bo->priv_flags = 0; |
1187 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); | 1211 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
@@ -1355,6 +1379,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1355 | BUG_ON(type >= TTM_NUM_MEM_TYPES); | 1379 | BUG_ON(type >= TTM_NUM_MEM_TYPES); |
1356 | man = &bdev->man[type]; | 1380 | man = &bdev->man[type]; |
1357 | BUG_ON(man->has_type); | 1381 | BUG_ON(man->has_type); |
1382 | man->io_reserve_fastpath = true; | ||
1383 | man->use_io_reserve_lru = false; | ||
1384 | mutex_init(&man->io_reserve_mutex); | ||
1385 | INIT_LIST_HEAD(&man->io_reserve_lru); | ||
1358 | 1386 | ||
1359 | ret = bdev->driver->init_mem_type(bdev, type, man); | 1387 | ret = bdev->driver->init_mem_type(bdev, type, man); |
1360 | if (ret) | 1388 | if (ret) |
@@ -1526,7 +1554,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
1526 | bdev->dev_mapping = NULL; | 1554 | bdev->dev_mapping = NULL; |
1527 | bdev->glob = glob; | 1555 | bdev->glob = glob; |
1528 | bdev->need_dma32 = need_dma32; | 1556 | bdev->need_dma32 = need_dma32; |
1529 | 1557 | bdev->val_seq = 0; | |
1558 | spin_lock_init(&bdev->fence_lock); | ||
1530 | mutex_lock(&glob->device_list_mutex); | 1559 | mutex_lock(&glob->device_list_mutex); |
1531 | list_add_tail(&bdev->device_list, &glob->device_list); | 1560 | list_add_tail(&bdev->device_list, &glob->device_list); |
1532 | mutex_unlock(&glob->device_list_mutex); | 1561 | mutex_unlock(&glob->device_list_mutex); |
@@ -1560,7 +1589,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
1560 | return true; | 1589 | return true; |
1561 | } | 1590 | } |
1562 | 1591 | ||
1563 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | 1592 | void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) |
1564 | { | 1593 | { |
1565 | struct ttm_bo_device *bdev = bo->bdev; | 1594 | struct ttm_bo_device *bdev = bo->bdev; |
1566 | loff_t offset = (loff_t) bo->addr_space_offset; | 1595 | loff_t offset = (loff_t) bo->addr_space_offset; |
@@ -1569,8 +1598,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | |||
1569 | if (!bdev->dev_mapping) | 1598 | if (!bdev->dev_mapping) |
1570 | return; | 1599 | return; |
1571 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); | 1600 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); |
1572 | ttm_mem_io_free(bdev, &bo->mem); | 1601 | ttm_mem_io_free_vm(bo); |
1602 | } | ||
1603 | |||
1604 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | ||
1605 | { | ||
1606 | struct ttm_bo_device *bdev = bo->bdev; | ||
1607 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; | ||
1608 | |||
1609 | ttm_mem_io_lock(man, false); | ||
1610 | ttm_bo_unmap_virtual_locked(bo); | ||
1611 | ttm_mem_io_unlock(man); | ||
1573 | } | 1612 | } |
1613 | |||
1614 | |||
1574 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); | 1615 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); |
1575 | 1616 | ||
1576 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) | 1617 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) |
@@ -1650,6 +1691,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1650 | bool lazy, bool interruptible, bool no_wait) | 1691 | bool lazy, bool interruptible, bool no_wait) |
1651 | { | 1692 | { |
1652 | struct ttm_bo_driver *driver = bo->bdev->driver; | 1693 | struct ttm_bo_driver *driver = bo->bdev->driver; |
1694 | struct ttm_bo_device *bdev = bo->bdev; | ||
1653 | void *sync_obj; | 1695 | void *sync_obj; |
1654 | void *sync_obj_arg; | 1696 | void *sync_obj_arg; |
1655 | int ret = 0; | 1697 | int ret = 0; |
@@ -1663,9 +1705,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1663 | void *tmp_obj = bo->sync_obj; | 1705 | void *tmp_obj = bo->sync_obj; |
1664 | bo->sync_obj = NULL; | 1706 | bo->sync_obj = NULL; |
1665 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 1707 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
1666 | spin_unlock(&bo->lock); | 1708 | spin_unlock(&bdev->fence_lock); |
1667 | driver->sync_obj_unref(&tmp_obj); | 1709 | driver->sync_obj_unref(&tmp_obj); |
1668 | spin_lock(&bo->lock); | 1710 | spin_lock(&bdev->fence_lock); |
1669 | continue; | 1711 | continue; |
1670 | } | 1712 | } |
1671 | 1713 | ||
@@ -1674,29 +1716,29 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1674 | 1716 | ||
1675 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | 1717 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
1676 | sync_obj_arg = bo->sync_obj_arg; | 1718 | sync_obj_arg = bo->sync_obj_arg; |
1677 | spin_unlock(&bo->lock); | 1719 | spin_unlock(&bdev->fence_lock); |
1678 | ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, | 1720 | ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, |
1679 | lazy, interruptible); | 1721 | lazy, interruptible); |
1680 | if (unlikely(ret != 0)) { | 1722 | if (unlikely(ret != 0)) { |
1681 | driver->sync_obj_unref(&sync_obj); | 1723 | driver->sync_obj_unref(&sync_obj); |
1682 | spin_lock(&bo->lock); | 1724 | spin_lock(&bdev->fence_lock); |
1683 | return ret; | 1725 | return ret; |
1684 | } | 1726 | } |
1685 | spin_lock(&bo->lock); | 1727 | spin_lock(&bdev->fence_lock); |
1686 | if (likely(bo->sync_obj == sync_obj && | 1728 | if (likely(bo->sync_obj == sync_obj && |
1687 | bo->sync_obj_arg == sync_obj_arg)) { | 1729 | bo->sync_obj_arg == sync_obj_arg)) { |
1688 | void *tmp_obj = bo->sync_obj; | 1730 | void *tmp_obj = bo->sync_obj; |
1689 | bo->sync_obj = NULL; | 1731 | bo->sync_obj = NULL; |
1690 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, | 1732 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, |
1691 | &bo->priv_flags); | 1733 | &bo->priv_flags); |
1692 | spin_unlock(&bo->lock); | 1734 | spin_unlock(&bdev->fence_lock); |
1693 | driver->sync_obj_unref(&sync_obj); | 1735 | driver->sync_obj_unref(&sync_obj); |
1694 | driver->sync_obj_unref(&tmp_obj); | 1736 | driver->sync_obj_unref(&tmp_obj); |
1695 | spin_lock(&bo->lock); | 1737 | spin_lock(&bdev->fence_lock); |
1696 | } else { | 1738 | } else { |
1697 | spin_unlock(&bo->lock); | 1739 | spin_unlock(&bdev->fence_lock); |
1698 | driver->sync_obj_unref(&sync_obj); | 1740 | driver->sync_obj_unref(&sync_obj); |
1699 | spin_lock(&bo->lock); | 1741 | spin_lock(&bdev->fence_lock); |
1700 | } | 1742 | } |
1701 | } | 1743 | } |
1702 | return 0; | 1744 | return 0; |
@@ -1705,6 +1747,7 @@ EXPORT_SYMBOL(ttm_bo_wait); | |||
1705 | 1747 | ||
1706 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | 1748 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
1707 | { | 1749 | { |
1750 | struct ttm_bo_device *bdev = bo->bdev; | ||
1708 | int ret = 0; | 1751 | int ret = 0; |
1709 | 1752 | ||
1710 | /* | 1753 | /* |
@@ -1714,9 +1757,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | |||
1714 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); | 1757 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); |
1715 | if (unlikely(ret != 0)) | 1758 | if (unlikely(ret != 0)) |
1716 | return ret; | 1759 | return ret; |
1717 | spin_lock(&bo->lock); | 1760 | spin_lock(&bdev->fence_lock); |
1718 | ret = ttm_bo_wait(bo, false, true, no_wait); | 1761 | ret = ttm_bo_wait(bo, false, true, no_wait); |
1719 | spin_unlock(&bo->lock); | 1762 | spin_unlock(&bdev->fence_lock); |
1720 | if (likely(ret == 0)) | 1763 | if (likely(ret == 0)) |
1721 | atomic_inc(&bo->cpu_writers); | 1764 | atomic_inc(&bo->cpu_writers); |
1722 | ttm_bo_unreserve(bo); | 1765 | ttm_bo_unreserve(bo); |
@@ -1782,16 +1825,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |||
1782 | put_count = ttm_bo_del_from_lru(bo); | 1825 | put_count = ttm_bo_del_from_lru(bo); |
1783 | spin_unlock(&glob->lru_lock); | 1826 | spin_unlock(&glob->lru_lock); |
1784 | 1827 | ||
1785 | while (put_count--) | 1828 | ttm_bo_list_ref_sub(bo, put_count, true); |
1786 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
1787 | 1829 | ||
1788 | /** | 1830 | /** |
1789 | * Wait for GPU, then move to system cached. | 1831 | * Wait for GPU, then move to system cached. |
1790 | */ | 1832 | */ |
1791 | 1833 | ||
1792 | spin_lock(&bo->lock); | 1834 | spin_lock(&bo->bdev->fence_lock); |
1793 | ret = ttm_bo_wait(bo, false, false, false); | 1835 | ret = ttm_bo_wait(bo, false, false, false); |
1794 | spin_unlock(&bo->lock); | 1836 | spin_unlock(&bo->bdev->fence_lock); |
1795 | 1837 | ||
1796 | if (unlikely(ret != 0)) | 1838 | if (unlikely(ret != 0)) |
1797 | goto out; | 1839 | goto out; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 3106d5bcce32..77dbf408c0d0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -75,37 +75,123 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | |||
75 | } | 75 | } |
76 | EXPORT_SYMBOL(ttm_bo_move_ttm); | 76 | EXPORT_SYMBOL(ttm_bo_move_ttm); |
77 | 77 | ||
78 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 78 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
79 | { | 79 | { |
80 | int ret; | 80 | if (likely(man->io_reserve_fastpath)) |
81 | return 0; | ||
82 | |||
83 | if (interruptible) | ||
84 | return mutex_lock_interruptible(&man->io_reserve_mutex); | ||
85 | |||
86 | mutex_lock(&man->io_reserve_mutex); | ||
87 | return 0; | ||
88 | } | ||
81 | 89 | ||
82 | if (!mem->bus.io_reserved) { | 90 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) |
83 | mem->bus.io_reserved = true; | 91 | { |
92 | if (likely(man->io_reserve_fastpath)) | ||
93 | return; | ||
94 | |||
95 | mutex_unlock(&man->io_reserve_mutex); | ||
96 | } | ||
97 | |||
98 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) | ||
99 | { | ||
100 | struct ttm_buffer_object *bo; | ||
101 | |||
102 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) | ||
103 | return -EAGAIN; | ||
104 | |||
105 | bo = list_first_entry(&man->io_reserve_lru, | ||
106 | struct ttm_buffer_object, | ||
107 | io_reserve_lru); | ||
108 | list_del_init(&bo->io_reserve_lru); | ||
109 | ttm_bo_unmap_virtual_locked(bo); | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static int ttm_mem_io_reserve(struct ttm_bo_device *bdev, | ||
115 | struct ttm_mem_reg *mem) | ||
116 | { | ||
117 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
118 | int ret = 0; | ||
119 | |||
120 | if (!bdev->driver->io_mem_reserve) | ||
121 | return 0; | ||
122 | if (likely(man->io_reserve_fastpath)) | ||
123 | return bdev->driver->io_mem_reserve(bdev, mem); | ||
124 | |||
125 | if (bdev->driver->io_mem_reserve && | ||
126 | mem->bus.io_reserved_count++ == 0) { | ||
127 | retry: | ||
84 | ret = bdev->driver->io_mem_reserve(bdev, mem); | 128 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
129 | if (ret == -EAGAIN) { | ||
130 | ret = ttm_mem_io_evict(man); | ||
131 | if (ret == 0) | ||
132 | goto retry; | ||
133 | } | ||
134 | } | ||
135 | return ret; | ||
136 | } | ||
137 | |||
138 | static void ttm_mem_io_free(struct ttm_bo_device *bdev, | ||
139 | struct ttm_mem_reg *mem) | ||
140 | { | ||
141 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
142 | |||
143 | if (likely(man->io_reserve_fastpath)) | ||
144 | return; | ||
145 | |||
146 | if (bdev->driver->io_mem_reserve && | ||
147 | --mem->bus.io_reserved_count == 0 && | ||
148 | bdev->driver->io_mem_free) | ||
149 | bdev->driver->io_mem_free(bdev, mem); | ||
150 | |||
151 | } | ||
152 | |||
153 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) | ||
154 | { | ||
155 | struct ttm_mem_reg *mem = &bo->mem; | ||
156 | int ret; | ||
157 | |||
158 | if (!mem->bus.io_reserved_vm) { | ||
159 | struct ttm_mem_type_manager *man = | ||
160 | &bo->bdev->man[mem->mem_type]; | ||
161 | |||
162 | ret = ttm_mem_io_reserve(bo->bdev, mem); | ||
85 | if (unlikely(ret != 0)) | 163 | if (unlikely(ret != 0)) |
86 | return ret; | 164 | return ret; |
165 | mem->bus.io_reserved_vm = true; | ||
166 | if (man->use_io_reserve_lru) | ||
167 | list_add_tail(&bo->io_reserve_lru, | ||
168 | &man->io_reserve_lru); | ||
87 | } | 169 | } |
88 | return 0; | 170 | return 0; |
89 | } | 171 | } |
90 | 172 | ||
91 | void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 173 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
92 | { | 174 | { |
93 | if (bdev->driver->io_mem_reserve) { | 175 | struct ttm_mem_reg *mem = &bo->mem; |
94 | if (mem->bus.io_reserved) { | 176 | |
95 | mem->bus.io_reserved = false; | 177 | if (mem->bus.io_reserved_vm) { |
96 | bdev->driver->io_mem_free(bdev, mem); | 178 | mem->bus.io_reserved_vm = false; |
97 | } | 179 | list_del_init(&bo->io_reserve_lru); |
180 | ttm_mem_io_free(bo->bdev, mem); | ||
98 | } | 181 | } |
99 | } | 182 | } |
100 | 183 | ||
101 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | 184 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
102 | void **virtual) | 185 | void **virtual) |
103 | { | 186 | { |
187 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
104 | int ret; | 188 | int ret; |
105 | void *addr; | 189 | void *addr; |
106 | 190 | ||
107 | *virtual = NULL; | 191 | *virtual = NULL; |
192 | (void) ttm_mem_io_lock(man, false); | ||
108 | ret = ttm_mem_io_reserve(bdev, mem); | 193 | ret = ttm_mem_io_reserve(bdev, mem); |
194 | ttm_mem_io_unlock(man); | ||
109 | if (ret || !mem->bus.is_iomem) | 195 | if (ret || !mem->bus.is_iomem) |
110 | return ret; | 196 | return ret; |
111 | 197 | ||
@@ -117,7 +203,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | |||
117 | else | 203 | else |
118 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); | 204 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
119 | if (!addr) { | 205 | if (!addr) { |
206 | (void) ttm_mem_io_lock(man, false); | ||
120 | ttm_mem_io_free(bdev, mem); | 207 | ttm_mem_io_free(bdev, mem); |
208 | ttm_mem_io_unlock(man); | ||
121 | return -ENOMEM; | 209 | return -ENOMEM; |
122 | } | 210 | } |
123 | } | 211 | } |
@@ -134,7 +222,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | |||
134 | 222 | ||
135 | if (virtual && mem->bus.addr == NULL) | 223 | if (virtual && mem->bus.addr == NULL) |
136 | iounmap(virtual); | 224 | iounmap(virtual); |
225 | (void) ttm_mem_io_lock(man, false); | ||
137 | ttm_mem_io_free(bdev, mem); | 226 | ttm_mem_io_free(bdev, mem); |
227 | ttm_mem_io_unlock(man); | ||
138 | } | 228 | } |
139 | 229 | ||
140 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | 230 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) |
@@ -231,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
231 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | 321 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
232 | struct ttm_tt *ttm = bo->ttm; | 322 | struct ttm_tt *ttm = bo->ttm; |
233 | struct ttm_mem_reg *old_mem = &bo->mem; | 323 | struct ttm_mem_reg *old_mem = &bo->mem; |
234 | struct ttm_mem_reg old_copy = *old_mem; | 324 | struct ttm_mem_reg old_copy; |
235 | void *old_iomap; | 325 | void *old_iomap; |
236 | void *new_iomap; | 326 | void *new_iomap; |
237 | int ret; | 327 | int ret; |
@@ -280,8 +370,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
280 | } | 370 | } |
281 | mb(); | 371 | mb(); |
282 | out2: | 372 | out2: |
283 | ttm_bo_free_old_node(bo); | 373 | old_copy = *old_mem; |
284 | |||
285 | *old_mem = *new_mem; | 374 | *old_mem = *new_mem; |
286 | new_mem->mm_node = NULL; | 375 | new_mem->mm_node = NULL; |
287 | 376 | ||
@@ -292,9 +381,10 @@ out2: | |||
292 | } | 381 | } |
293 | 382 | ||
294 | out1: | 383 | out1: |
295 | ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); | 384 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
296 | out: | 385 | out: |
297 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); | 386 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); |
387 | ttm_bo_mem_put(bo, &old_copy); | ||
298 | return ret; | 388 | return ret; |
299 | } | 389 | } |
300 | EXPORT_SYMBOL(ttm_bo_move_memcpy); | 390 | EXPORT_SYMBOL(ttm_bo_move_memcpy); |
@@ -337,11 +427,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
337 | * TODO: Explicit member copy would probably be better here. | 427 | * TODO: Explicit member copy would probably be better here. |
338 | */ | 428 | */ |
339 | 429 | ||
340 | spin_lock_init(&fbo->lock); | ||
341 | init_waitqueue_head(&fbo->event_queue); | 430 | init_waitqueue_head(&fbo->event_queue); |
342 | INIT_LIST_HEAD(&fbo->ddestroy); | 431 | INIT_LIST_HEAD(&fbo->ddestroy); |
343 | INIT_LIST_HEAD(&fbo->lru); | 432 | INIT_LIST_HEAD(&fbo->lru); |
344 | INIT_LIST_HEAD(&fbo->swap); | 433 | INIT_LIST_HEAD(&fbo->swap); |
434 | INIT_LIST_HEAD(&fbo->io_reserve_lru); | ||
345 | fbo->vm_node = NULL; | 435 | fbo->vm_node = NULL; |
346 | atomic_set(&fbo->cpu_writers, 0); | 436 | atomic_set(&fbo->cpu_writers, 0); |
347 | 437 | ||
@@ -453,6 +543,8 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, | |||
453 | unsigned long start_page, unsigned long num_pages, | 543 | unsigned long start_page, unsigned long num_pages, |
454 | struct ttm_bo_kmap_obj *map) | 544 | struct ttm_bo_kmap_obj *map) |
455 | { | 545 | { |
546 | struct ttm_mem_type_manager *man = | ||
547 | &bo->bdev->man[bo->mem.mem_type]; | ||
456 | unsigned long offset, size; | 548 | unsigned long offset, size; |
457 | int ret; | 549 | int ret; |
458 | 550 | ||
@@ -467,7 +559,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, | |||
467 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) | 559 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) |
468 | return -EPERM; | 560 | return -EPERM; |
469 | #endif | 561 | #endif |
562 | (void) ttm_mem_io_lock(man, false); | ||
470 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); | 563 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
564 | ttm_mem_io_unlock(man); | ||
471 | if (ret) | 565 | if (ret) |
472 | return ret; | 566 | return ret; |
473 | if (!bo->mem.bus.is_iomem) { | 567 | if (!bo->mem.bus.is_iomem) { |
@@ -482,12 +576,15 @@ EXPORT_SYMBOL(ttm_bo_kmap); | |||
482 | 576 | ||
483 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | 577 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) |
484 | { | 578 | { |
579 | struct ttm_buffer_object *bo = map->bo; | ||
580 | struct ttm_mem_type_manager *man = | ||
581 | &bo->bdev->man[bo->mem.mem_type]; | ||
582 | |||
485 | if (!map->virtual) | 583 | if (!map->virtual) |
486 | return; | 584 | return; |
487 | switch (map->bo_kmap_type) { | 585 | switch (map->bo_kmap_type) { |
488 | case ttm_bo_map_iomap: | 586 | case ttm_bo_map_iomap: |
489 | iounmap(map->virtual); | 587 | iounmap(map->virtual); |
490 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); | ||
491 | break; | 588 | break; |
492 | case ttm_bo_map_vmap: | 589 | case ttm_bo_map_vmap: |
493 | vunmap(map->virtual); | 590 | vunmap(map->virtual); |
@@ -500,6 +597,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |||
500 | default: | 597 | default: |
501 | BUG(); | 598 | BUG(); |
502 | } | 599 | } |
600 | (void) ttm_mem_io_lock(man, false); | ||
601 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); | ||
602 | ttm_mem_io_unlock(man); | ||
503 | map->virtual = NULL; | 603 | map->virtual = NULL; |
504 | map->page = NULL; | 604 | map->page = NULL; |
505 | } | 605 | } |
@@ -520,7 +620,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
520 | struct ttm_buffer_object *ghost_obj; | 620 | struct ttm_buffer_object *ghost_obj; |
521 | void *tmp_obj = NULL; | 621 | void *tmp_obj = NULL; |
522 | 622 | ||
523 | spin_lock(&bo->lock); | 623 | spin_lock(&bdev->fence_lock); |
524 | if (bo->sync_obj) { | 624 | if (bo->sync_obj) { |
525 | tmp_obj = bo->sync_obj; | 625 | tmp_obj = bo->sync_obj; |
526 | bo->sync_obj = NULL; | 626 | bo->sync_obj = NULL; |
@@ -529,7 +629,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
529 | bo->sync_obj_arg = sync_obj_arg; | 629 | bo->sync_obj_arg = sync_obj_arg; |
530 | if (evict) { | 630 | if (evict) { |
531 | ret = ttm_bo_wait(bo, false, false, false); | 631 | ret = ttm_bo_wait(bo, false, false, false); |
532 | spin_unlock(&bo->lock); | 632 | spin_unlock(&bdev->fence_lock); |
533 | if (tmp_obj) | 633 | if (tmp_obj) |
534 | driver->sync_obj_unref(&tmp_obj); | 634 | driver->sync_obj_unref(&tmp_obj); |
535 | if (ret) | 635 | if (ret) |
@@ -552,7 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
552 | */ | 652 | */ |
553 | 653 | ||
554 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 654 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
555 | spin_unlock(&bo->lock); | 655 | spin_unlock(&bdev->fence_lock); |
556 | if (tmp_obj) | 656 | if (tmp_obj) |
557 | driver->sync_obj_unref(&tmp_obj); | 657 | driver->sync_obj_unref(&tmp_obj); |
558 | 658 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index fe6cb77899f4..221b924acebe 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -83,6 +83,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
83 | int i; | 83 | int i; |
84 | unsigned long address = (unsigned long)vmf->virtual_address; | 84 | unsigned long address = (unsigned long)vmf->virtual_address; |
85 | int retval = VM_FAULT_NOPAGE; | 85 | int retval = VM_FAULT_NOPAGE; |
86 | struct ttm_mem_type_manager *man = | ||
87 | &bdev->man[bo->mem.mem_type]; | ||
86 | 88 | ||
87 | /* | 89 | /* |
88 | * Work around locking order reversal in fault / nopfn | 90 | * Work around locking order reversal in fault / nopfn |
@@ -118,24 +120,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
118 | * move. | 120 | * move. |
119 | */ | 121 | */ |
120 | 122 | ||
121 | spin_lock(&bo->lock); | 123 | spin_lock(&bdev->fence_lock); |
122 | if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { | 124 | if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { |
123 | ret = ttm_bo_wait(bo, false, true, false); | 125 | ret = ttm_bo_wait(bo, false, true, false); |
124 | spin_unlock(&bo->lock); | 126 | spin_unlock(&bdev->fence_lock); |
125 | if (unlikely(ret != 0)) { | 127 | if (unlikely(ret != 0)) { |
126 | retval = (ret != -ERESTARTSYS) ? | 128 | retval = (ret != -ERESTARTSYS) ? |
127 | VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; | 129 | VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; |
128 | goto out_unlock; | 130 | goto out_unlock; |
129 | } | 131 | } |
130 | } else | 132 | } else |
131 | spin_unlock(&bo->lock); | 133 | spin_unlock(&bdev->fence_lock); |
132 | 134 | ||
133 | 135 | ret = ttm_mem_io_lock(man, true); | |
134 | ret = ttm_mem_io_reserve(bdev, &bo->mem); | 136 | if (unlikely(ret != 0)) { |
135 | if (ret) { | 137 | retval = VM_FAULT_NOPAGE; |
136 | retval = VM_FAULT_SIGBUS; | ||
137 | goto out_unlock; | 138 | goto out_unlock; |
138 | } | 139 | } |
140 | ret = ttm_mem_io_reserve_vm(bo); | ||
141 | if (unlikely(ret != 0)) { | ||
142 | retval = VM_FAULT_SIGBUS; | ||
143 | goto out_io_unlock; | ||
144 | } | ||
139 | 145 | ||
140 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + | 146 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + |
141 | bo->vm_node->start - vma->vm_pgoff; | 147 | bo->vm_node->start - vma->vm_pgoff; |
@@ -144,7 +150,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
144 | 150 | ||
145 | if (unlikely(page_offset >= bo->num_pages)) { | 151 | if (unlikely(page_offset >= bo->num_pages)) { |
146 | retval = VM_FAULT_SIGBUS; | 152 | retval = VM_FAULT_SIGBUS; |
147 | goto out_unlock; | 153 | goto out_io_unlock; |
148 | } | 154 | } |
149 | 155 | ||
150 | /* | 156 | /* |
@@ -182,7 +188,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
182 | page = ttm_tt_get_page(ttm, page_offset); | 188 | page = ttm_tt_get_page(ttm, page_offset); |
183 | if (unlikely(!page && i == 0)) { | 189 | if (unlikely(!page && i == 0)) { |
184 | retval = VM_FAULT_OOM; | 190 | retval = VM_FAULT_OOM; |
185 | goto out_unlock; | 191 | goto out_io_unlock; |
186 | } else if (unlikely(!page)) { | 192 | } else if (unlikely(!page)) { |
187 | break; | 193 | break; |
188 | } | 194 | } |
@@ -200,14 +206,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
200 | else if (unlikely(ret != 0)) { | 206 | else if (unlikely(ret != 0)) { |
201 | retval = | 207 | retval = |
202 | (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; | 208 | (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; |
203 | goto out_unlock; | 209 | goto out_io_unlock; |
204 | } | 210 | } |
205 | 211 | ||
206 | address += PAGE_SIZE; | 212 | address += PAGE_SIZE; |
207 | if (unlikely(++page_offset >= page_last)) | 213 | if (unlikely(++page_offset >= page_last)) |
208 | break; | 214 | break; |
209 | } | 215 | } |
210 | 216 | out_io_unlock: | |
217 | ttm_mem_io_unlock(man); | ||
211 | out_unlock: | 218 | out_unlock: |
212 | ttm_bo_unreserve(bo); | 219 | ttm_bo_unreserve(bo); |
213 | return retval; | 220 | return retval; |
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index c285c2902d15..3832fe10b4df 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | 34 | ||
35 | void ttm_eu_backoff_reservation(struct list_head *list) | 35 | static void ttm_eu_backoff_reservation_locked(struct list_head *list) |
36 | { | 36 | { |
37 | struct ttm_validate_buffer *entry; | 37 | struct ttm_validate_buffer *entry; |
38 | 38 | ||
@@ -41,10 +41,77 @@ void ttm_eu_backoff_reservation(struct list_head *list) | |||
41 | if (!entry->reserved) | 41 | if (!entry->reserved) |
42 | continue; | 42 | continue; |
43 | 43 | ||
44 | if (entry->removed) { | ||
45 | ttm_bo_add_to_lru(bo); | ||
46 | entry->removed = false; | ||
47 | |||
48 | } | ||
44 | entry->reserved = false; | 49 | entry->reserved = false; |
45 | ttm_bo_unreserve(bo); | 50 | atomic_set(&bo->reserved, 0); |
51 | wake_up_all(&bo->event_queue); | ||
52 | } | ||
53 | } | ||
54 | |||
55 | static void ttm_eu_del_from_lru_locked(struct list_head *list) | ||
56 | { | ||
57 | struct ttm_validate_buffer *entry; | ||
58 | |||
59 | list_for_each_entry(entry, list, head) { | ||
60 | struct ttm_buffer_object *bo = entry->bo; | ||
61 | if (!entry->reserved) | ||
62 | continue; | ||
63 | |||
64 | if (!entry->removed) { | ||
65 | entry->put_count = ttm_bo_del_from_lru(bo); | ||
66 | entry->removed = true; | ||
67 | } | ||
46 | } | 68 | } |
47 | } | 69 | } |
70 | |||
71 | static void ttm_eu_list_ref_sub(struct list_head *list) | ||
72 | { | ||
73 | struct ttm_validate_buffer *entry; | ||
74 | |||
75 | list_for_each_entry(entry, list, head) { | ||
76 | struct ttm_buffer_object *bo = entry->bo; | ||
77 | |||
78 | if (entry->put_count) { | ||
79 | ttm_bo_list_ref_sub(bo, entry->put_count, true); | ||
80 | entry->put_count = 0; | ||
81 | } | ||
82 | } | ||
83 | } | ||
84 | |||
85 | static int ttm_eu_wait_unreserved_locked(struct list_head *list, | ||
86 | struct ttm_buffer_object *bo) | ||
87 | { | ||
88 | struct ttm_bo_global *glob = bo->glob; | ||
89 | int ret; | ||
90 | |||
91 | ttm_eu_del_from_lru_locked(list); | ||
92 | spin_unlock(&glob->lru_lock); | ||
93 | ret = ttm_bo_wait_unreserved(bo, true); | ||
94 | spin_lock(&glob->lru_lock); | ||
95 | if (unlikely(ret != 0)) | ||
96 | ttm_eu_backoff_reservation_locked(list); | ||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | |||
101 | void ttm_eu_backoff_reservation(struct list_head *list) | ||
102 | { | ||
103 | struct ttm_validate_buffer *entry; | ||
104 | struct ttm_bo_global *glob; | ||
105 | |||
106 | if (list_empty(list)) | ||
107 | return; | ||
108 | |||
109 | entry = list_first_entry(list, struct ttm_validate_buffer, head); | ||
110 | glob = entry->bo->glob; | ||
111 | spin_lock(&glob->lru_lock); | ||
112 | ttm_eu_backoff_reservation_locked(list); | ||
113 | spin_unlock(&glob->lru_lock); | ||
114 | } | ||
48 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); | 115 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); |
49 | 116 | ||
50 | /* | 117 | /* |
@@ -59,37 +126,76 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); | |||
59 | * buffers in different orders. | 126 | * buffers in different orders. |
60 | */ | 127 | */ |
61 | 128 | ||
62 | int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) | 129 | int ttm_eu_reserve_buffers(struct list_head *list) |
63 | { | 130 | { |
131 | struct ttm_bo_global *glob; | ||
64 | struct ttm_validate_buffer *entry; | 132 | struct ttm_validate_buffer *entry; |
65 | int ret; | 133 | int ret; |
134 | uint32_t val_seq; | ||
135 | |||
136 | if (list_empty(list)) | ||
137 | return 0; | ||
138 | |||
139 | list_for_each_entry(entry, list, head) { | ||
140 | entry->reserved = false; | ||
141 | entry->put_count = 0; | ||
142 | entry->removed = false; | ||
143 | } | ||
144 | |||
145 | entry = list_first_entry(list, struct ttm_validate_buffer, head); | ||
146 | glob = entry->bo->glob; | ||
66 | 147 | ||
67 | retry: | 148 | retry: |
149 | spin_lock(&glob->lru_lock); | ||
150 | val_seq = entry->bo->bdev->val_seq++; | ||
151 | |||
68 | list_for_each_entry(entry, list, head) { | 152 | list_for_each_entry(entry, list, head) { |
69 | struct ttm_buffer_object *bo = entry->bo; | 153 | struct ttm_buffer_object *bo = entry->bo; |
70 | 154 | ||
71 | entry->reserved = false; | 155 | retry_this_bo: |
72 | ret = ttm_bo_reserve(bo, true, false, true, val_seq); | 156 | ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); |
73 | if (ret != 0) { | 157 | switch (ret) { |
74 | ttm_eu_backoff_reservation(list); | 158 | case 0: |
75 | if (ret == -EAGAIN) { | 159 | break; |
76 | ret = ttm_bo_wait_unreserved(bo, true); | 160 | case -EBUSY: |
77 | if (unlikely(ret != 0)) | 161 | ret = ttm_eu_wait_unreserved_locked(list, bo); |
78 | return ret; | 162 | if (unlikely(ret != 0)) { |
79 | goto retry; | 163 | spin_unlock(&glob->lru_lock); |
80 | } else | 164 | ttm_eu_list_ref_sub(list); |
81 | return ret; | 165 | return ret; |
166 | } | ||
167 | goto retry_this_bo; | ||
168 | case -EAGAIN: | ||
169 | ttm_eu_backoff_reservation_locked(list); | ||
170 | spin_unlock(&glob->lru_lock); | ||
171 | ttm_eu_list_ref_sub(list); | ||
172 | ret = ttm_bo_wait_unreserved(bo, true); | ||
173 | if (unlikely(ret != 0)) | ||
174 | return ret; | ||
175 | goto retry; | ||
176 | default: | ||
177 | ttm_eu_backoff_reservation_locked(list); | ||
178 | spin_unlock(&glob->lru_lock); | ||
179 | ttm_eu_list_ref_sub(list); | ||
180 | return ret; | ||
82 | } | 181 | } |
83 | 182 | ||
84 | entry->reserved = true; | 183 | entry->reserved = true; |
85 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { | 184 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { |
86 | ttm_eu_backoff_reservation(list); | 185 | ttm_eu_backoff_reservation_locked(list); |
186 | spin_unlock(&glob->lru_lock); | ||
187 | ttm_eu_list_ref_sub(list); | ||
87 | ret = ttm_bo_wait_cpu(bo, false); | 188 | ret = ttm_bo_wait_cpu(bo, false); |
88 | if (ret) | 189 | if (ret) |
89 | return ret; | 190 | return ret; |
90 | goto retry; | 191 | goto retry; |
91 | } | 192 | } |
92 | } | 193 | } |
194 | |||
195 | ttm_eu_del_from_lru_locked(list); | ||
196 | spin_unlock(&glob->lru_lock); | ||
197 | ttm_eu_list_ref_sub(list); | ||
198 | |||
93 | return 0; | 199 | return 0; |
94 | } | 200 | } |
95 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); | 201 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); |
@@ -97,21 +203,36 @@ EXPORT_SYMBOL(ttm_eu_reserve_buffers); | |||
97 | void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) | 203 | void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) |
98 | { | 204 | { |
99 | struct ttm_validate_buffer *entry; | 205 | struct ttm_validate_buffer *entry; |
206 | struct ttm_buffer_object *bo; | ||
207 | struct ttm_bo_global *glob; | ||
208 | struct ttm_bo_device *bdev; | ||
209 | struct ttm_bo_driver *driver; | ||
100 | 210 | ||
101 | list_for_each_entry(entry, list, head) { | 211 | if (list_empty(list)) |
102 | struct ttm_buffer_object *bo = entry->bo; | 212 | return; |
103 | struct ttm_bo_driver *driver = bo->bdev->driver; | 213 | |
104 | void *old_sync_obj; | 214 | bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; |
215 | bdev = bo->bdev; | ||
216 | driver = bdev->driver; | ||
217 | glob = bo->glob; | ||
105 | 218 | ||
106 | spin_lock(&bo->lock); | 219 | spin_lock(&bdev->fence_lock); |
107 | old_sync_obj = bo->sync_obj; | 220 | spin_lock(&glob->lru_lock); |
221 | |||
222 | list_for_each_entry(entry, list, head) { | ||
223 | bo = entry->bo; | ||
224 | entry->old_sync_obj = bo->sync_obj; | ||
108 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | 225 | bo->sync_obj = driver->sync_obj_ref(sync_obj); |
109 | bo->sync_obj_arg = entry->new_sync_obj_arg; | 226 | bo->sync_obj_arg = entry->new_sync_obj_arg; |
110 | spin_unlock(&bo->lock); | 227 | ttm_bo_unreserve_locked(bo); |
111 | ttm_bo_unreserve(bo); | ||
112 | entry->reserved = false; | 228 | entry->reserved = false; |
113 | if (old_sync_obj) | 229 | } |
114 | driver->sync_obj_unref(&old_sync_obj); | 230 | spin_unlock(&glob->lru_lock); |
231 | spin_unlock(&bdev->fence_lock); | ||
232 | |||
233 | list_for_each_entry(entry, list, head) { | ||
234 | if (entry->old_sync_obj) | ||
235 | driver->sync_obj_unref(&entry->old_sync_obj); | ||
115 | } | 236 | } |
116 | } | 237 | } |
117 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); | 238 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); |