diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 44 |
1 files changed, 36 insertions, 8 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 25e4c2a1d1d8..cf2ec562550e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -378,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
378 | int ret = 0; | 378 | int ret = 0; |
379 | 379 | ||
380 | if (old_is_pci || new_is_pci || | 380 | if (old_is_pci || new_is_pci || |
381 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) | 381 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { |
382 | ttm_bo_unmap_virtual(bo); | 382 | ret = ttm_mem_io_lock(old_man, true); |
383 | if (unlikely(ret != 0)) | ||
384 | goto out_err; | ||
385 | ttm_bo_unmap_virtual_locked(bo); | ||
386 | ttm_mem_io_unlock(old_man); | ||
387 | } | ||
383 | 388 | ||
384 | /* | 389 | /* |
385 | * Create and bind a ttm if required. | 390 | * Create and bind a ttm if required. |
@@ -466,7 +471,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | |||
466 | ttm_tt_destroy(bo->ttm); | 471 | ttm_tt_destroy(bo->ttm); |
467 | bo->ttm = NULL; | 472 | bo->ttm = NULL; |
468 | } | 473 | } |
469 | |||
470 | ttm_bo_mem_put(bo, &bo->mem); | 474 | ttm_bo_mem_put(bo, &bo->mem); |
471 | 475 | ||
472 | atomic_set(&bo->reserved, 0); | 476 | atomic_set(&bo->reserved, 0); |
@@ -665,6 +669,7 @@ static void ttm_bo_release(struct kref *kref) | |||
665 | struct ttm_buffer_object *bo = | 669 | struct ttm_buffer_object *bo = |
666 | container_of(kref, struct ttm_buffer_object, kref); | 670 | container_of(kref, struct ttm_buffer_object, kref); |
667 | struct ttm_bo_device *bdev = bo->bdev; | 671 | struct ttm_bo_device *bdev = bo->bdev; |
672 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; | ||
668 | 673 | ||
669 | if (likely(bo->vm_node != NULL)) { | 674 | if (likely(bo->vm_node != NULL)) { |
670 | rb_erase(&bo->vm_rb, &bdev->addr_space_rb); | 675 | rb_erase(&bo->vm_rb, &bdev->addr_space_rb); |
@@ -672,6 +677,9 @@ static void ttm_bo_release(struct kref *kref) | |||
672 | bo->vm_node = NULL; | 677 | bo->vm_node = NULL; |
673 | } | 678 | } |
674 | write_unlock(&bdev->vm_lock); | 679 | write_unlock(&bdev->vm_lock); |
680 | ttm_mem_io_lock(man, false); | ||
681 | ttm_mem_io_free_vm(bo); | ||
682 | ttm_mem_io_unlock(man); | ||
675 | ttm_bo_cleanup_refs_or_queue(bo); | 683 | ttm_bo_cleanup_refs_or_queue(bo); |
676 | kref_put(&bo->list_kref, ttm_bo_release_list); | 684 | kref_put(&bo->list_kref, ttm_bo_release_list); |
677 | write_lock(&bdev->vm_lock); | 685 | write_lock(&bdev->vm_lock); |
@@ -728,7 +736,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
728 | 736 | ||
729 | evict_mem = bo->mem; | 737 | evict_mem = bo->mem; |
730 | evict_mem.mm_node = NULL; | 738 | evict_mem.mm_node = NULL; |
731 | evict_mem.bus.io_reserved = false; | 739 | evict_mem.bus.io_reserved_vm = false; |
740 | evict_mem.bus.io_reserved_count = 0; | ||
732 | 741 | ||
733 | placement.fpfn = 0; | 742 | placement.fpfn = 0; |
734 | placement.lpfn = 0; | 743 | placement.lpfn = 0; |
@@ -1065,7 +1074,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
1065 | mem.num_pages = bo->num_pages; | 1074 | mem.num_pages = bo->num_pages; |
1066 | mem.size = mem.num_pages << PAGE_SHIFT; | 1075 | mem.size = mem.num_pages << PAGE_SHIFT; |
1067 | mem.page_alignment = bo->mem.page_alignment; | 1076 | mem.page_alignment = bo->mem.page_alignment; |
1068 | mem.bus.io_reserved = false; | 1077 | mem.bus.io_reserved_vm = false; |
1078 | mem.bus.io_reserved_count = 0; | ||
1069 | /* | 1079 | /* |
1070 | * Determine where to move the buffer. | 1080 | * Determine where to move the buffer. |
1071 | */ | 1081 | */ |
@@ -1184,6 +1194,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1184 | INIT_LIST_HEAD(&bo->lru); | 1194 | INIT_LIST_HEAD(&bo->lru); |
1185 | INIT_LIST_HEAD(&bo->ddestroy); | 1195 | INIT_LIST_HEAD(&bo->ddestroy); |
1186 | INIT_LIST_HEAD(&bo->swap); | 1196 | INIT_LIST_HEAD(&bo->swap); |
1197 | INIT_LIST_HEAD(&bo->io_reserve_lru); | ||
1187 | bo->bdev = bdev; | 1198 | bo->bdev = bdev; |
1188 | bo->glob = bdev->glob; | 1199 | bo->glob = bdev->glob; |
1189 | bo->type = type; | 1200 | bo->type = type; |
@@ -1193,7 +1204,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1193 | bo->mem.num_pages = bo->num_pages; | 1204 | bo->mem.num_pages = bo->num_pages; |
1194 | bo->mem.mm_node = NULL; | 1205 | bo->mem.mm_node = NULL; |
1195 | bo->mem.page_alignment = page_alignment; | 1206 | bo->mem.page_alignment = page_alignment; |
1196 | bo->mem.bus.io_reserved = false; | 1207 | bo->mem.bus.io_reserved_vm = false; |
1208 | bo->mem.bus.io_reserved_count = 0; | ||
1197 | bo->buffer_start = buffer_start & PAGE_MASK; | 1209 | bo->buffer_start = buffer_start & PAGE_MASK; |
1198 | bo->priv_flags = 0; | 1210 | bo->priv_flags = 0; |
1199 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); | 1211 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
@@ -1367,6 +1379,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1367 | BUG_ON(type >= TTM_NUM_MEM_TYPES); | 1379 | BUG_ON(type >= TTM_NUM_MEM_TYPES); |
1368 | man = &bdev->man[type]; | 1380 | man = &bdev->man[type]; |
1369 | BUG_ON(man->has_type); | 1381 | BUG_ON(man->has_type); |
1382 | man->io_reserve_fastpath = true; | ||
1383 | man->use_io_reserve_lru = false; | ||
1384 | mutex_init(&man->io_reserve_mutex); | ||
1385 | INIT_LIST_HEAD(&man->io_reserve_lru); | ||
1370 | 1386 | ||
1371 | ret = bdev->driver->init_mem_type(bdev, type, man); | 1387 | ret = bdev->driver->init_mem_type(bdev, type, man); |
1372 | if (ret) | 1388 | if (ret) |
@@ -1574,7 +1590,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
1574 | return true; | 1590 | return true; |
1575 | } | 1591 | } |
1576 | 1592 | ||
1577 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | 1593 | void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) |
1578 | { | 1594 | { |
1579 | struct ttm_bo_device *bdev = bo->bdev; | 1595 | struct ttm_bo_device *bdev = bo->bdev; |
1580 | loff_t offset = (loff_t) bo->addr_space_offset; | 1596 | loff_t offset = (loff_t) bo->addr_space_offset; |
@@ -1583,8 +1599,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | |||
1583 | if (!bdev->dev_mapping) | 1599 | if (!bdev->dev_mapping) |
1584 | return; | 1600 | return; |
1585 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); | 1601 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); |
1586 | ttm_mem_io_free(bdev, &bo->mem); | 1602 | ttm_mem_io_free_vm(bo); |
1587 | } | 1603 | } |
1604 | |||
1605 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | ||
1606 | { | ||
1607 | struct ttm_bo_device *bdev = bo->bdev; | ||
1608 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; | ||
1609 | |||
1610 | ttm_mem_io_lock(man, false); | ||
1611 | ttm_bo_unmap_virtual_locked(bo); | ||
1612 | ttm_mem_io_unlock(man); | ||
1613 | } | ||
1614 | |||
1615 | |||
1588 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); | 1616 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); |
1589 | 1617 | ||
1590 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) | 1618 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) |