aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2014-08-07 03:36:00 -0400
committerAlex Deucher <alexander.deucher@amd.com>2014-08-11 11:04:23 -0400
commitf72a113a71ab08c4df8a5f80ab2f8a140feb81f6 (patch)
tree6d1453a5ffef6dfab8fdf1bc2e85b9abc531ed9a /drivers/gpu/drm
parent7963e9db1b1f842fdc53309baa8714d38e9f5681 (diff)
drm/radeon: add userptr support v8
This patch adds an IOCTL for turning a pointer supplied by userspace into a buffer object. It imposes several restrictions upon the memory being mapped: 1. It must be page aligned (both start/end addresses, i.e ptr and size). 2. It must be normal system memory, not a pointer into another map of IO space (e.g. it must not be a GTT mmapping of another object). 3. The BO is mapped into GTT, so the maximum amount of memory mapped at all times is still the GTT limit. 4. The BO is only mapped readonly for now, so no write support. 5. List of backing pages is only acquired once, so they represent a snapshot of the first use. Exporting and sharing as well as mapping of buffer objects created by this function is forbidden and results in an -EPERM. v2: squash all previous changes into first public version v3: fix tabs, map readonly, don't use MM callback any more v4: set TTM_PAGE_FLAG_SG so that TTM never messes with the pages, pin/unpin pages on bind/unbind instead of populate/unpopulate v5: rebased on 3.17-wip, IOCTL renamed to userptr, reject any unknown flags, better handle READONLY flag, improve permission check v6: fix ptr cast warning, use set_page_dirty/mark_page_accessed on unpin v7: add warning about it's availability in the API definition v8: drop access_ok check, fix VM mapping bits Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> (v4) Reviewed-by: Jérôme Glisse <jglisse@redhat.com> (v4) Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c68
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c145
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c3
9 files changed, 263 insertions, 3 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 9e1732eb402c..6f38a23a5810 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2138,6 +2138,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
2138 struct drm_file *filp); 2138 struct drm_file *filp);
2139int radeon_gem_create_ioctl(struct drm_device *dev, void *data, 2139int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
2140 struct drm_file *filp); 2140 struct drm_file *filp);
2141int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
2142 struct drm_file *filp);
2141int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, 2143int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
2142 struct drm_file *file_priv); 2144 struct drm_file *file_priv);
2143int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, 2145int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -2871,6 +2873,10 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl
2871extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 2873extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
2872extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); 2874extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
2873extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); 2875extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
2876extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2877 uint32_t flags);
2878extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm);
2879extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm);
2874extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); 2880extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
2875extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 2881extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
2876extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); 2882extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ee712c199b25..1321491cf499 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -78,7 +78,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
78 struct radeon_cs_chunk *chunk; 78 struct radeon_cs_chunk *chunk;
79 struct radeon_cs_buckets buckets; 79 struct radeon_cs_buckets buckets;
80 unsigned i, j; 80 unsigned i, j;
81 bool duplicate; 81 bool duplicate, need_mmap_lock = false;
82 int r;
82 83
83 if (p->chunk_relocs_idx == -1) { 84 if (p->chunk_relocs_idx == -1) {
84 return 0; 85 return 0;
@@ -164,6 +165,19 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
164 p->relocs[i].allowed_domains = domain; 165 p->relocs[i].allowed_domains = domain;
165 } 166 }
166 167
168 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
169 uint32_t domain = p->relocs[i].prefered_domains;
170 if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
171 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
172 "allowed for userptr BOs\n");
173 return -EINVAL;
174 }
175 need_mmap_lock = true;
176 domain = RADEON_GEM_DOMAIN_GTT;
177 p->relocs[i].prefered_domains = domain;
178 p->relocs[i].allowed_domains = domain;
179 }
180
167 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; 181 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
168 p->relocs[i].handle = r->handle; 182 p->relocs[i].handle = r->handle;
169 183
@@ -176,8 +190,15 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
176 if (p->cs_flags & RADEON_CS_USE_VM) 190 if (p->cs_flags & RADEON_CS_USE_VM)
177 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, 191 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
178 &p->validated); 192 &p->validated);
193 if (need_mmap_lock)
194 down_read(&current->mm->mmap_sem);
195
196 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
179 197
180 return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); 198 if (need_mmap_lock)
199 up_read(&current->mm->mmap_sem);
200
201 return r;
181} 202}
182 203
183static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) 204static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index a773830c6c40..5b18af926527 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -114,6 +114,9 @@ int radeon_gem_object_open(struct drm_gem_object *obj,
114 struct drm_file *file_priv); 114 struct drm_file *file_priv);
115void radeon_gem_object_close(struct drm_gem_object *obj, 115void radeon_gem_object_close(struct drm_gem_object *obj,
116 struct drm_file *file_priv); 116 struct drm_file *file_priv);
117struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
118 struct drm_gem_object *gobj,
119 int flags);
117extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 120extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
118 unsigned int flags, 121 unsigned int flags,
119 int *vpos, int *hpos, ktime_t *stime, 122 int *vpos, int *hpos, ktime_t *stime,
@@ -568,7 +571,7 @@ static struct drm_driver kms_driver = {
568 571
569 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 572 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
570 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 573 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
571 .gem_prime_export = drm_gem_prime_export, 574 .gem_prime_export = radeon_gem_prime_export,
572 .gem_prime_import = drm_gem_prime_import, 575 .gem_prime_import = drm_gem_prime_import,
573 .gem_prime_pin = radeon_gem_prime_pin, 576 .gem_prime_pin = radeon_gem_prime_pin,
574 .gem_prime_unpin = radeon_gem_prime_unpin, 577 .gem_prime_unpin = radeon_gem_prime_unpin,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index bfd7e1b0ff3f..993ab223b503 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -272,6 +272,65 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
272 return 0; 272 return 0;
273} 273}
274 274
275int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
276 struct drm_file *filp)
277{
278 struct radeon_device *rdev = dev->dev_private;
279 struct drm_radeon_gem_userptr *args = data;
280 struct drm_gem_object *gobj;
281 struct radeon_bo *bo;
282 uint32_t handle;
283 int r;
284
285 if (offset_in_page(args->addr | args->size))
286 return -EINVAL;
287
288 /* we only support read only mappings for now */
289 if (!(args->flags & RADEON_GEM_USERPTR_READONLY))
290 return -EACCES;
291
292 /* reject unknown flag values */
293 if (args->flags & ~RADEON_GEM_USERPTR_READONLY)
294 return -EINVAL;
295
296 /* readonly pages not tested on older hardware */
297 if (rdev->family < CHIP_R600)
298 return -EINVAL;
299
300 down_read(&rdev->exclusive_lock);
301
302 /* create a gem object to contain this object in */
303 r = radeon_gem_object_create(rdev, args->size, 0,
304 RADEON_GEM_DOMAIN_CPU, 0,
305 false, &gobj);
306 if (r)
307 goto handle_lockup;
308
309 bo = gem_to_radeon_bo(gobj);
310 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
311 if (r)
312 goto release_object;
313
314 r = drm_gem_handle_create(filp, gobj, &handle);
315 /* drop reference from allocate - handle holds it now */
316 drm_gem_object_unreference_unlocked(gobj);
317 if (r)
318 goto handle_lockup;
319
320 args->handle = handle;
321 up_read(&rdev->exclusive_lock);
322 return 0;
323
324release_object:
325 drm_gem_object_unreference_unlocked(gobj);
326
327handle_lockup:
328 up_read(&rdev->exclusive_lock);
329 r = radeon_gem_handle_lockup(rdev, r);
330
331 return r;
332}
333
275int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, 334int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
276 struct drm_file *filp) 335 struct drm_file *filp)
277{ 336{
@@ -315,6 +374,10 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
315 return -ENOENT; 374 return -ENOENT;
316 } 375 }
317 robj = gem_to_radeon_bo(gobj); 376 robj = gem_to_radeon_bo(gobj);
377 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
378 drm_gem_object_unreference_unlocked(gobj);
379 return -EPERM;
380 }
318 *offset_p = radeon_bo_mmap_offset(robj); 381 *offset_p = radeon_bo_mmap_offset(robj);
319 drm_gem_object_unreference_unlocked(gobj); 382 drm_gem_object_unreference_unlocked(gobj);
320 return 0; 383 return 0;
@@ -532,6 +595,11 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
532 return -ENOENT; 595 return -ENOENT;
533 } 596 }
534 robj = gem_to_radeon_bo(gobj); 597 robj = gem_to_radeon_bo(gobj);
598
599 r = -EPERM;
600 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
601 goto out;
602
535 r = radeon_bo_reserve(robj, false); 603 r = radeon_bo_reserve(robj, false);
536 if (unlikely(r)) 604 if (unlikely(r))
537 goto out; 605 goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index eb7164d07985..8309b11e674d 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -885,5 +885,6 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = {
885 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 885 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
886 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 886 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
887 DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 887 DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
888 DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
888}; 889};
889int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); 890int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 480c87d8edc5..c73c1e320585 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -264,6 +264,9 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
264{ 264{
265 int r, i; 265 int r, i;
266 266
267 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
268 return -EPERM;
269
267 if (bo->pin_count) { 270 if (bo->pin_count) {
268 bo->pin_count++; 271 bo->pin_count++;
269 if (gpu_addr) 272 if (gpu_addr)
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index f7e48d329db3..bb18bc74b7d7 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -103,3 +103,13 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj)
103 radeon_bo_unpin(bo); 103 radeon_bo_unpin(bo);
104 radeon_bo_unreserve(bo); 104 radeon_bo_unreserve(bo);
105} 105}
106
107struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
108 struct drm_gem_object *gobj,
109 int flags)
110{
111 struct radeon_bo *bo = gem_to_radeon_bo(gobj);
112 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
113 return ERR_PTR(-EPERM);
114 return drm_gem_prime_export(dev, gobj, flags);
115}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 72afe82a95c9..b20933fa35c6 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -39,6 +39,8 @@
39#include <linux/seq_file.h> 39#include <linux/seq_file.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/swiotlb.h> 41#include <linux/swiotlb.h>
42#include <linux/swap.h>
43#include <linux/pagemap.h>
42#include <linux/debugfs.h> 44#include <linux/debugfs.h>
43#include "radeon_reg.h" 45#include "radeon_reg.h"
44#include "radeon.h" 46#include "radeon.h"
@@ -515,8 +517,92 @@ struct radeon_ttm_tt {
515 struct ttm_dma_tt ttm; 517 struct ttm_dma_tt ttm;
516 struct radeon_device *rdev; 518 struct radeon_device *rdev;
517 u64 offset; 519 u64 offset;
520
521 uint64_t userptr;
522 struct mm_struct *usermm;
523 uint32_t userflags;
518}; 524};
519 525
526/* prepare the sg table with the user pages */
527static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
528{
529 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
530 struct radeon_ttm_tt *gtt = (void *)ttm;
531 unsigned pinned = 0, nents;
532 int r;
533
534 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
535 enum dma_data_direction direction = write ?
536 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
537
538 if (current->mm != gtt->usermm)
539 return -EPERM;
540
541 do {
542 unsigned num_pages = ttm->num_pages - pinned;
543 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
544 struct page **pages = ttm->pages + pinned;
545
546 r = get_user_pages(current, current->mm, userptr, num_pages,
547 write, 0, pages, NULL);
548 if (r < 0)
549 goto release_pages;
550
551 pinned += r;
552
553 } while (pinned < ttm->num_pages);
554
555 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
556 ttm->num_pages << PAGE_SHIFT,
557 GFP_KERNEL);
558 if (r)
559 goto release_sg;
560
561 r = -ENOMEM;
562 nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
563 if (nents != ttm->sg->nents)
564 goto release_sg;
565
566 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
567 gtt->ttm.dma_address, ttm->num_pages);
568
569 return 0;
570
571release_sg:
572 kfree(ttm->sg);
573
574release_pages:
575 release_pages(ttm->pages, pinned, 0);
576 return r;
577}
578
579static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
580{
581 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
582 struct radeon_ttm_tt *gtt = (void *)ttm;
583 struct scatterlist *sg;
584 int i;
585
586 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
587 enum dma_data_direction direction = write ?
588 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
589
590 /* free the sg table and pages again */
591 dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
592
593 for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) {
594 struct page *page = sg_page(sg);
595
596 if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
597 set_page_dirty(page);
598
599 mark_page_accessed(page);
600 page_cache_release(page);
601 }
602
603 sg_free_table(ttm->sg);
604}
605
520static int radeon_ttm_backend_bind(struct ttm_tt *ttm, 606static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
521 struct ttm_mem_reg *bo_mem) 607 struct ttm_mem_reg *bo_mem)
522{ 608{
@@ -525,6 +611,11 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
525 RADEON_GART_PAGE_WRITE; 611 RADEON_GART_PAGE_WRITE;
526 int r; 612 int r;
527 613
614 if (gtt->userptr) {
615 radeon_ttm_tt_pin_userptr(ttm);
616 flags &= ~RADEON_GART_PAGE_WRITE;
617 }
618
528 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); 619 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
529 if (!ttm->num_pages) { 620 if (!ttm->num_pages) {
530 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", 621 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
@@ -547,6 +638,10 @@ static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
547 struct radeon_ttm_tt *gtt = (void *)ttm; 638 struct radeon_ttm_tt *gtt = (void *)ttm;
548 639
549 radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); 640 radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
641
642 if (gtt->userptr)
643 radeon_ttm_tt_unpin_userptr(ttm);
644
550 return 0; 645 return 0;
551} 646}
552 647
@@ -603,6 +698,16 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
603 if (ttm->state != tt_unpopulated) 698 if (ttm->state != tt_unpopulated)
604 return 0; 699 return 0;
605 700
701 if (gtt->userptr) {
702 ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL);
703 if (!ttm->sg)
704 return -ENOMEM;
705
706 ttm->page_flags |= TTM_PAGE_FLAG_SG;
707 ttm->state = tt_unbound;
708 return 0;
709 }
710
606 if (slave && ttm->sg) { 711 if (slave && ttm->sg) {
607 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, 712 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
608 gtt->ttm.dma_address, ttm->num_pages); 713 gtt->ttm.dma_address, ttm->num_pages);
@@ -652,6 +757,12 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
652 unsigned i; 757 unsigned i;
653 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 758 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
654 759
760 if (gtt->userptr) {
761 kfree(ttm->sg);
762 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
763 return;
764 }
765
655 if (slave) 766 if (slave)
656 return; 767 return;
657 768
@@ -680,6 +791,40 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
680 ttm_pool_unpopulate(ttm); 791 ttm_pool_unpopulate(ttm);
681} 792}
682 793
794int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
795 uint32_t flags)
796{
797 struct radeon_ttm_tt *gtt = (void *)ttm;
798
799 if (gtt == NULL)
800 return -EINVAL;
801
802 gtt->userptr = addr;
803 gtt->usermm = current->mm;
804 gtt->userflags = flags;
805 return 0;
806}
807
808bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
809{
810 struct radeon_ttm_tt *gtt = (void *)ttm;
811
812 if (gtt == NULL)
813 return false;
814
815 return !!gtt->userptr;
816}
817
818bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
819{
820 struct radeon_ttm_tt *gtt = (void *)ttm;
821
822 if (gtt == NULL)
823 return false;
824
825 return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
826}
827
683static struct ttm_bo_driver radeon_bo_driver = { 828static struct ttm_bo_driver radeon_bo_driver = {
684 .ttm_tt_create = &radeon_ttm_tt_create, 829 .ttm_tt_create = &radeon_ttm_tt_create,
685 .ttm_tt_populate = &radeon_ttm_tt_populate, 830 .ttm_tt_populate = &radeon_ttm_tt_populate,
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index ccae4d9dc3de..0e107c5650bf 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -888,6 +888,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
888 bo_va->flags &= ~RADEON_VM_PAGE_VALID; 888 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
889 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; 889 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
890 bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; 890 bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
891 if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm))
892 bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
893
891 if (mem) { 894 if (mem) {
892 addr = mem->start << PAGE_SHIFT; 895 addr = mem->start << PAGE_SHIFT;
893 if (mem->mem_type != TTM_PL_SYSTEM) { 896 if (mem->mem_type != TTM_PL_SYSTEM) {