aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGerd Hoffmann <kraxel@redhat.com>2019-08-05 10:01:10 -0400
committerGerd Hoffmann <kraxel@redhat.com>2019-08-06 02:21:54 -0400
commitb96f3e7c8069b749a40ca3a33c97835d57dd45d2 (patch)
treea695e9c84d685b9c7d72dd7c3fe039c79c2830ed
parent1e053b10ba60eae6a3f9de64cbc74bdf6cb0e715 (diff)
drm/ttm: use gem vma_node
Drop vma_node from ttm_buffer_object, use the gem struct (base.vma_node) instead. Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Reviewed-by: Christian König <christian.koenig@amd.com> Link: http://patchwork.freedesktop.org/patch/msgid/20190805140119.7337-9-kraxel@redhat.com
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c9
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c4
-rw-r--r--include/drm/ttm/ttm_bo_api.h4
14 files changed, 21 insertions, 27 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 645a189d365c..113fb2feb437 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -191,7 +191,7 @@ static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
191 */ 191 */
192static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) 192static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
193{ 193{
194 return drm_vma_node_offset_addr(&bo->tbo.vma_node); 194 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
195} 195}
196 196
197/** 197/**
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index fc13920b3cb4..fd751078bae1 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -168,7 +168,7 @@ EXPORT_SYMBOL(drm_gem_vram_put);
168 */ 168 */
169u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo) 169u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
170{ 170{
171 return drm_vma_node_offset_addr(&gbo->bo.vma_node); 171 return drm_vma_node_offset_addr(&gbo->bo.base.vma_node);
172} 172}
173EXPORT_SYMBOL(drm_gem_vram_mmap_offset); 173EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
174 174
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index fc8f5bb73ca8..98afc50162e9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -675,7 +675,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
675 gem = drm_gem_object_lookup(file_priv, handle); 675 gem = drm_gem_object_lookup(file_priv, handle);
676 if (gem) { 676 if (gem) {
677 struct nouveau_bo *bo = nouveau_gem_object(gem); 677 struct nouveau_bo *bo = nouveau_gem_object(gem);
678 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); 678 *poffset = drm_vma_node_offset_addr(&bo->bo.base.vma_node);
679 drm_gem_object_put_unlocked(gem); 679 drm_gem_object_put_unlocked(gem);
680 return 0; 680 return 0;
681 } 681 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 2f484ab7dbca..b1e4852810ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -240,7 +240,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
240 } 240 }
241 241
242 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; 242 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
243 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); 243 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
244 rep->tile_mode = nvbo->mode; 244 rep->tile_mode = nvbo->mode;
245 rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG; 245 rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
246 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) 246 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index b812d4ae9d0d..8ae54ba7857c 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -60,7 +60,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
60 60
61static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) 61static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
62{ 62{
63 return drm_vma_node_offset_addr(&bo->tbo.vma_node); 63 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
64} 64}
65 65
66static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, 66static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 9ffd8215d38a..e5554bf9140e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -116,7 +116,7 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
116 */ 116 */
117static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) 117static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
118{ 118{
119 return drm_vma_node_offset_addr(&bo->tbo.vma_node); 119 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
120} 120}
121 121
122extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, 122extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ceff153f7e68..3e0a0cbc410e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -672,7 +672,7 @@ static void ttm_bo_release(struct kref *kref)
672 struct ttm_bo_device *bdev = bo->bdev; 672 struct ttm_bo_device *bdev = bo->bdev;
673 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 673 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
674 674
675 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); 675 drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node);
676 ttm_mem_io_lock(man, false); 676 ttm_mem_io_lock(man, false);
677 ttm_mem_io_free_vm(bo); 677 ttm_mem_io_free_vm(bo);
678 ttm_mem_io_unlock(man); 678 ttm_mem_io_unlock(man);
@@ -1343,9 +1343,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1343 * struct elements we want use regardless. 1343 * struct elements we want use regardless.
1344 */ 1344 */
1345 reservation_object_init(&bo->base._resv); 1345 reservation_object_init(&bo->base._resv);
1346 drm_vma_node_reset(&bo->base.vma_node);
1346 } 1347 }
1347 atomic_inc(&bo->bdev->glob->bo_count); 1348 atomic_inc(&bo->bdev->glob->bo_count);
1348 drm_vma_node_reset(&bo->vma_node);
1349 1349
1350 /* 1350 /*
1351 * For ttm_bo_type_device buffers, allocate 1351 * For ttm_bo_type_device buffers, allocate
@@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1353 */ 1353 */
1354 if (bo->type == ttm_bo_type_device || 1354 if (bo->type == ttm_bo_type_device ||
1355 bo->type == ttm_bo_type_sg) 1355 bo->type == ttm_bo_type_sg)
1356 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, 1356 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node,
1357 bo->mem.num_pages); 1357 bo->mem.num_pages);
1358 1358
1359 /* passed reservation objects should already be locked, 1359 /* passed reservation objects should already be locked,
@@ -1781,7 +1781,7 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1781{ 1781{
1782 struct ttm_bo_device *bdev = bo->bdev; 1782 struct ttm_bo_device *bdev = bo->bdev;
1783 1783
1784 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); 1784 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1785 ttm_mem_io_free_vm(bo); 1785 ttm_mem_io_free_vm(bo);
1786} 1786}
1787 1787
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 05fbcaf6a3f2..f5009c1b6a9c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -510,7 +510,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
510 INIT_LIST_HEAD(&fbo->base.io_reserve_lru); 510 INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
511 mutex_init(&fbo->base.wu_mutex); 511 mutex_init(&fbo->base.wu_mutex);
512 fbo->base.moving = NULL; 512 fbo->base.moving = NULL;
513 drm_vma_node_reset(&fbo->base.vma_node); 513 drm_vma_node_reset(&fbo->base.base.vma_node);
514 atomic_set(&fbo->base.cpu_writers, 0); 514 atomic_set(&fbo->base.cpu_writers, 0);
515 515
516 kref_init(&fbo->base.list_kref); 516 kref_init(&fbo->base.list_kref);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 6dacff49c1cc..fb6875a789b7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -211,9 +211,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
211 } 211 }
212 212
213 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 213 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
214 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); 214 vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
215 page_last = vma_pages(vma) + vma->vm_pgoff - 215 page_last = vma_pages(vma) + vma->vm_pgoff -
216 drm_vma_node_start(&bo->vma_node); 216 drm_vma_node_start(&bo->base.vma_node);
217 217
218 if (unlikely(page_offset >= bo->num_pages)) { 218 if (unlikely(page_offset >= bo->num_pages)) {
219 ret = VM_FAULT_SIGBUS; 219 ret = VM_FAULT_SIGBUS;
@@ -267,7 +267,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
267 } else if (unlikely(!page)) { 267 } else if (unlikely(!page)) {
268 break; 268 break;
269 } 269 }
270 page->index = drm_vma_node_start(&bo->vma_node) + 270 page->index = drm_vma_node_start(&bo->base.vma_node) +
271 page_offset; 271 page_offset;
272 pfn = page_to_pfn(page); 272 pfn = page_to_pfn(page);
273 } 273 }
@@ -413,7 +413,8 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
413 413
414 node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages); 414 node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
415 if (likely(node)) { 415 if (likely(node)) {
416 bo = container_of(node, struct ttm_buffer_object, vma_node); 416 bo = container_of(node, struct ttm_buffer_object,
417 base.vma_node);
417 bo = ttm_bo_get_unless_zero(bo); 418 bo = ttm_bo_get_unless_zero(bo);
418 } 419 }
419 420
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index f4ecea6054ba..e28829661724 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -396,7 +396,7 @@ static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
396 396
397static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo) 397static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
398{ 398{
399 return drm_vma_node_offset_addr(&bo->tbo.vma_node); 399 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
400} 400}
401 401
402static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo, 402static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 8b3b2caf3364..dc642a884b88 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -68,8 +68,5 @@ void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
68int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, 68int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
69 struct vm_area_struct *vma) 69 struct vm_area_struct *vma)
70{ 70{
71 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
72
73 bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start;
74 return drm_gem_prime_mmap(obj, vma); 71 return drm_gem_prime_mmap(obj, vma);
75} 72}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 315da41a18b4..5739c6c49c99 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -835,7 +835,7 @@ int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
835 goto out_no_bo; 835 goto out_no_bo;
836 836
837 rep->handle = handle; 837 rep->handle = handle;
838 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node); 838 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
839 rep->cur_gmr_id = handle; 839 rep->cur_gmr_id = handle;
840 rep->cur_gmr_offset = 0; 840 rep->cur_gmr_offset = 0;
841 841
@@ -1077,7 +1077,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
1077 if (ret != 0) 1077 if (ret != 0)
1078 return -EINVAL; 1078 return -EINVAL;
1079 1079
1080 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node); 1080 *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1081 vmw_bo_unreference(&out_buf); 1081 vmw_bo_unreference(&out_buf);
1082 return 0; 1082 return 0;
1083} 1083}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 219471903bc1..3a6da3b66484 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1669,7 +1669,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
1669 rep->backup_size = res->backup_size; 1669 rep->backup_size = res->backup_size;
1670 if (res->backup) { 1670 if (res->backup) {
1671 rep->buffer_map_handle = 1671 rep->buffer_map_handle =
1672 drm_vma_node_offset_addr(&res->backup->base.vma_node); 1672 drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
1673 rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; 1673 rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
1674 rep->buffer_handle = backup_handle; 1674 rep->buffer_handle = backup_handle;
1675 } else { 1675 } else {
@@ -1745,7 +1745,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
1745 rep->crep.backup_size = srf->res.backup_size; 1745 rep->crep.backup_size = srf->res.backup_size;
1746 rep->crep.buffer_handle = backup_handle; 1746 rep->crep.buffer_handle = backup_handle;
1747 rep->crep.buffer_map_handle = 1747 rep->crep.buffer_map_handle =
1748 drm_vma_node_offset_addr(&srf->res.backup->base.vma_node); 1748 drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
1749 rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; 1749 rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
1750 1750
1751 rep->creq.version = drm_vmw_gb_surface_v1; 1751 rep->creq.version = drm_vmw_gb_surface_v1;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index fa050f0328ab..7ffc50a3303d 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -152,7 +152,6 @@ struct ttm_tt;
152 * @ddestroy: List head for the delayed destroy list. 152 * @ddestroy: List head for the delayed destroy list.
153 * @swap: List head for swap LRU list. 153 * @swap: List head for swap LRU list.
154 * @moving: Fence set when BO is moving 154 * @moving: Fence set when BO is moving
155 * @vma_node: Address space manager node.
156 * @offset: The current GPU offset, which can have different meanings 155 * @offset: The current GPU offset, which can have different meanings
157 * depending on the memory type. For SYSTEM type memory, it should be 0. 156 * depending on the memory type. For SYSTEM type memory, it should be 0.
158 * @cur_placement: Hint of current placement. 157 * @cur_placement: Hint of current placement.
@@ -219,9 +218,6 @@ struct ttm_buffer_object {
219 */ 218 */
220 219
221 struct dma_fence *moving; 220 struct dma_fence *moving;
222
223 struct drm_vma_offset_node vma_node;
224
225 unsigned priority; 221 unsigned priority;
226 222
227 /** 223 /**